From 4763a457c158ecd1ee768ca7fe9d6c5e7a5a46ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Igor=20Pe=C4=8Dovnik?= Date: Wed, 10 Feb 2021 18:31:58 +0100 Subject: [PATCH 1/7] Add missing overclocking opp for N2+ (#2629) --- config/kernel/linux-meson64-current.config | 2 +- ...opp-values-for-clocking-g12b-N2-high.patch | 46 +++++++++++++++++++ ...opp-values-for-clocking-g12b-N2-high.patch | 46 +++++++++++++++++++ 3 files changed, 93 insertions(+), 1 deletion(-) create mode 100644 patch/kernel/meson64-current/0001-Add-missing-CPU-opp-values-for-clocking-g12b-N2-high.patch create mode 100644 patch/kernel/meson64-dev/0001-Add-missing-CPU-opp-values-for-clocking-g12b-N2-high.patch diff --git a/config/kernel/linux-meson64-current.config b/config/kernel/linux-meson64-current.config index 6bc3e5a68..7ee3fbbb2 100644 --- a/config/kernel/linux-meson64-current.config +++ b/config/kernel/linux-meson64-current.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 5.10.6 Kernel Configuration +# Linux/arm64 5.10.10 Kernel Configuration # CONFIG_CC_VERSION_TEXT="aarch64-linux-gnu-gcc (GNU Toolchain for the A-profile Architecture 8.3-2019.03 (arm-rel-8.36)) 8.3.0" CONFIG_CC_IS_GCC=y diff --git a/patch/kernel/meson64-current/0001-Add-missing-CPU-opp-values-for-clocking-g12b-N2-high.patch b/patch/kernel/meson64-current/0001-Add-missing-CPU-opp-values-for-clocking-g12b-N2-high.patch new file mode 100644 index 000000000..d9f307131 --- /dev/null +++ b/patch/kernel/meson64-current/0001-Add-missing-CPU-opp-values-for-clocking-g12b-N2-high.patch @@ -0,0 +1,46 @@ +From 712b399ed54f49e0ac7ae92c57ed775604eaaed9 Mon Sep 17 00:00:00 2001 +From: Igor Pecovnik +Date: Wed, 10 Feb 2021 18:07:08 +0100 +Subject: [PATCH] Add missing CPU opp values for clocking g12b / N2+ higher + +Signed-off-by: Igor Pecovnik +--- + .../arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi | 16 ++++++++++++++++ + 1 file changed, 16 insertions(+) + +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi +index d61f43052..75030d197 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi ++++ b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi +@@ -65,6 +65,14 @@ opp-1800000000 { + opp-hz = /bits/ 64 <1800000000>; + opp-microvolt = <1001000>; + }; ++ opp-1908000000 { ++ opp-hz = /bits/ 64 <1908000000>; ++ opp-microvolt = <1030000>; ++ }; ++ opp-2016000000 { ++ opp-hz = /bits/ 64 <2016000000>; ++ opp-microvolt = <1040000>; ++ }; + }; + + cpub_opp_table_1: opp-table-1 { +@@ -145,5 +153,13 @@ opp-2208000000 { + opp-hz = /bits/ 64 <2208000000>; + opp-microvolt = <1011000>; + }; ++ opp-2304000000 { ++ opp-hz = /bits/ 64 <2304000000>; ++ opp-microvolt = <1030000>; ++ }; ++ opp-2400000000 { ++ opp-hz = /bits/ 64 <2400000000>; ++ opp-microvolt = <1040000>; ++ }; + }; + }; +-- +Created with Armbian build tools https://github.com/armbian/build + diff --git a/patch/kernel/meson64-dev/0001-Add-missing-CPU-opp-values-for-clocking-g12b-N2-high.patch b/patch/kernel/meson64-dev/0001-Add-missing-CPU-opp-values-for-clocking-g12b-N2-high.patch new file mode 100644 index 000000000..d9f307131 --- /dev/null +++ b/patch/kernel/meson64-dev/0001-Add-missing-CPU-opp-values-for-clocking-g12b-N2-high.patch @@ -0,0 +1,46 @@ +From 712b399ed54f49e0ac7ae92c57ed775604eaaed9 Mon Sep 17 00:00:00 2001 +From: Igor Pecovnik +Date: Wed, 10 Feb 2021 18:07:08 +0100 +Subject: [PATCH] Add missing CPU opp values for clocking g12b / N2+ higher + +Signed-off-by: Igor Pecovnik +--- + .../arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi | 16 ++++++++++++++++ + 1 file changed, 16 insertions(+) + +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi +index d61f43052..75030d197 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi ++++ b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi +@@ -65,6 +65,14 @@ opp-1800000000 { + opp-hz = /bits/ 64 <1800000000>; + opp-microvolt = <1001000>; + }; ++ opp-1908000000 { ++ opp-hz = /bits/ 64 <1908000000>; ++ opp-microvolt = <1030000>; ++ }; ++ opp-2016000000 { ++ opp-hz = /bits/ 64 <2016000000>; ++ opp-microvolt = <1040000>; ++ }; + }; + + cpub_opp_table_1: opp-table-1 { +@@ -145,5 +153,13 @@ opp-2208000000 { + opp-hz = /bits/ 64 <2208000000>; + opp-microvolt = <1011000>; + }; ++ opp-2304000000 { ++ opp-hz = /bits/ 64 <2304000000>; ++ opp-microvolt = <1030000>; ++ }; ++ opp-2400000000 { ++ opp-hz = /bits/ 64 <2400000000>; ++ opp-microvolt = <1040000>; ++ }; + }; + }; +-- +Created with Armbian build tools https://github.com/armbian/build + From dc98eeb26454bcd36763c219afbb78fd2814900b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Igor=20Pe=C4=8Dovnik?= Date: Thu, 11 Feb 2021 00:11:44 +0100 Subject: [PATCH 2/7] Add upstream patches (#2630) --- config/kernel/linux-odroidxu4-current.config | 2 +- .../odroidxu4-current/patch-5.4.94-95.patch | 1747 +++++++++++++ .../odroidxu4-current/patch-5.4.95-96.patch | 1218 +++++++++ .../odroidxu4-current/patch-5.4.96-97.patch | 2250 +++++++++++++++++ 4 files changed, 5216 insertions(+), 1 deletion(-) create mode 100644 patch/kernel/odroidxu4-current/patch-5.4.94-95.patch create mode 100644 patch/kernel/odroidxu4-current/patch-5.4.95-96.patch create mode 100644 patch/kernel/odroidxu4-current/patch-5.4.96-97.patch diff --git a/config/kernel/linux-odroidxu4-current.config b/config/kernel/linux-odroidxu4-current.config index 275af67f4..7a52e063b 100644 --- a/config/kernel/linux-odroidxu4-current.config +++ b/config/kernel/linux-odroidxu4-current.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm 5.4.94 Kernel Configuration +# Linux/arm 5.4.97 Kernel Configuration # # diff --git a/patch/kernel/odroidxu4-current/patch-5.4.94-95.patch b/patch/kernel/odroidxu4-current/patch-5.4.94-95.patch new file mode 100644 index 000000000..f6a36b55c --- /dev/null +++ b/patch/kernel/odroidxu4-current/patch-5.4.94-95.patch @@ -0,0 +1,1747 @@ +diff --git a/Documentation/virt/kvm/api.txt b/Documentation/virt/kvm/api.txt +index a18e996fa54b0..7064efd3b5ea3 100644 +--- a/Documentation/virt/kvm/api.txt ++++ b/Documentation/virt/kvm/api.txt +@@ -1132,6 +1132,9 @@ field userspace_addr, which must point at user addressable memory for + the entire memory slot size. Any object may back this memory, including + anonymous memory, ordinary files, and hugetlbfs. + ++On architectures that support a form of address tagging, userspace_addr must ++be an untagged address. ++ + It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr + be identical. This allows large pages in the guest to be backed by large + pages in the host. +diff --git a/Makefile b/Makefile +index ad1b8dc6e462a..aa3c2e834442e 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 94 ++SUBLEVEL = 95 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi +index 1a9a9d98f2848..14d6fec50dee2 100644 +--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi ++++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi +@@ -273,7 +273,7 @@ + + /* VDD_AUD_1P8: Audio codec */ + reg_aud_1p8v: ldo3 { +- regulator-name = "vdd1p8"; ++ regulator-name = "vdd1p8a"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-boot-on; +diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi +index 6acc8591219a7..eea317b41020d 100644 +--- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi ++++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi +@@ -167,7 +167,7 @@ + i2c-gpio,delay-us = <2>; /* ~100 kHz */ + #address-cells = <1>; + #size-cells = <0>; +- status = "disabld"; ++ status = "disabled"; + }; + + i2c_cam: i2c-gpio-cam { +@@ -179,7 +179,7 @@ + i2c-gpio,delay-us = <2>; /* ~100 kHz */ + #address-cells = <1>; + #size-cells = <0>; +- status = "disabld"; ++ status = "disabled"; + }; + }; + +diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S +index 1eabf2d2834be..e06f946b75b96 100644 +--- a/arch/arm/mach-imx/suspend-imx6.S ++++ b/arch/arm/mach-imx/suspend-imx6.S +@@ -67,6 +67,7 @@ + #define MX6Q_CCM_CCR 0x0 + + .align 3 ++ .arm + + .macro sync_l2_cache + +diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi +index aef8f2b00778d..5401a646c8406 100644 +--- a/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi ++++ b/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi +@@ -4,11 +4,16 @@ + */ + usb { + compatible = "simple-bus"; +- dma-ranges; + #address-cells = <2>; + #size-cells = <2>; + ranges = <0x0 0x0 0x0 0x68500000 0x0 0x00400000>; + ++ /* ++ * Internally, USB bus to the interconnect can only address up ++ * to 40-bit ++ */ ++ dma-ranges = <0 0 0 0 0x100 0x0>; ++ + usbphy0: usb-phy@0 { + compatible = "brcm,sr-usb-combo-phy"; + reg = <0x0 0x00000000 0x0 0x100>; +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +index 795d6ca4bbd1f..bd99fa68b7630 100644 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +@@ -103,7 +103,7 @@ + reboot { + compatible ="syscon-reboot"; + regmap = <&rst>; +- offset = <0xb0>; ++ offset = <0>; + mask = <0x02>; + }; + +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c +index 08e1e7544f823..e32e8bcf94553 100644 +--- a/arch/x86/kvm/vmx/nested.c ++++ b/arch/x86/kvm/vmx/nested.c +@@ -5579,11 +5579,14 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, + if (is_guest_mode(vcpu)) { + sync_vmcs02_to_vmcs12(vcpu, vmcs12); + sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); +- } else if (!vmx->nested.need_vmcs12_to_shadow_sync) { +- if (vmx->nested.hv_evmcs) +- copy_enlightened_to_vmcs12(vmx); +- else if (enable_shadow_vmcs) +- copy_shadow_to_vmcs12(vmx); ++ } else { ++ copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu)); ++ if (!vmx->nested.need_vmcs12_to_shadow_sync) { ++ if (vmx->nested.hv_evmcs) ++ copy_enlightened_to_vmcs12(vmx); ++ else if (enable_shadow_vmcs) ++ copy_shadow_to_vmcs12(vmx); ++ } + } + + BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE); +diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c +index f8998a7bc7d56..181e352d38de4 100644 +--- a/arch/x86/kvm/vmx/pmu_intel.c ++++ b/arch/x86/kvm/vmx/pmu_intel.c +@@ -26,7 +26,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = { + [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES }, + [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, + [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, +- [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES }, ++ [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES }, + }; + + /* mapping between fixed pmc index and intel_arch_events array */ +@@ -296,7 +296,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) + + pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, + x86_pmu.num_counters_gp); ++ eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp); + pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; ++ eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len); + pmu->available_event_types = ~entry->ebx & + ((1ull << eax.split.mask_length) - 1); + +@@ -306,6 +308,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) + pmu->nr_arch_fixed_counters = + min_t(int, edx.split.num_counters_fixed, + x86_pmu.num_counters_fixed); ++ edx.split.bit_width_fixed = min_t(int, ++ edx.split.bit_width_fixed, x86_pmu.bit_width_fixed); + pmu->counter_bitmask[KVM_PMC_FIXED] = + ((u64)1 << edx.split.bit_width_fixed) - 1; + } +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 72990c3c6faf7..73095d7213993 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -102,6 +102,7 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS; + + static void update_cr8_intercept(struct kvm_vcpu *vcpu); + static void process_nmi(struct kvm_vcpu *vcpu); ++static void process_smi(struct kvm_vcpu *vcpu); + static void enter_smm(struct kvm_vcpu *vcpu); + static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); + static void store_regs(struct kvm_vcpu *vcpu); +@@ -3772,6 +3773,10 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, + { + process_nmi(vcpu); + ++ ++ if (kvm_check_request(KVM_REQ_SMI, vcpu)) ++ process_smi(vcpu); ++ + /* + * The API doesn't provide the instruction length for software + * exceptions, so don't report them. As long as the guest RIP +diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c +index 96869f1538b93..bfca116482b8b 100644 +--- a/drivers/acpi/device_sysfs.c ++++ b/drivers/acpi/device_sysfs.c +@@ -251,20 +251,12 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev, + if (add_uevent_var(env, "MODALIAS=")) + return -ENOMEM; + +- len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], +- sizeof(env->buf) - env->buflen); +- if (len < 0) +- return len; +- +- env->buflen += len; +- if (!adev->data.of_compatible) +- return 0; +- +- if (len > 0 && add_uevent_var(env, "MODALIAS=")) +- return -ENOMEM; +- +- len = create_of_modalias(adev, &env->buf[env->buflen - 1], +- sizeof(env->buf) - env->buflen); ++ if (adev->data.of_compatible) ++ len = create_of_modalias(adev, &env->buf[env->buflen - 1], ++ sizeof(env->buf) - env->buflen); ++ else ++ len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], ++ sizeof(env->buf) - env->buflen); + if (len < 0) + return len; + +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c +index a3037fe54c3ab..f068bb5d650eb 100644 +--- a/drivers/block/nbd.c ++++ b/drivers/block/nbd.c +@@ -1014,6 +1014,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, + if (!sock) + return err; + ++ /* ++ * We need to make sure we don't get any errant requests while we're ++ * reallocating the ->socks array. ++ */ ++ blk_mq_freeze_queue(nbd->disk->queue); ++ + if (!netlink && !nbd->task_setup && + !test_bit(NBD_RT_BOUND, &config->runtime_flags)) + nbd->task_setup = current; +@@ -1052,10 +1058,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, + nsock->cookie = 0; + socks[config->num_connections++] = nsock; + atomic_inc(&config->live_connections); ++ blk_mq_unfreeze_queue(nbd->disk->queue); + + return 0; + + put_socket: ++ blk_mq_unfreeze_queue(nbd->disk->queue); + sockfd_put(sock); + return err; + } +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c +index ab5482202cfb3..def41e1bd7364 100644 +--- a/drivers/block/xen-blkfront.c ++++ b/drivers/block/xen-blkfront.c +@@ -936,7 +936,8 @@ static void blkif_set_queue_limits(struct blkfront_info *info) + if (info->feature_discard) { + blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq); + blk_queue_max_discard_sectors(rq, get_capacity(gd)); +- rq->limits.discard_granularity = info->discard_granularity; ++ rq->limits.discard_granularity = info->discard_granularity ?: ++ info->physical_sector_size; + rq->limits.discard_alignment = info->discard_alignment; + if (info->feature_secdiscard) + blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq); +@@ -2169,19 +2170,12 @@ static void blkfront_closing(struct blkfront_info *info) + + static void blkfront_setup_discard(struct blkfront_info *info) + { +- int err; +- unsigned int discard_granularity; +- unsigned int discard_alignment; +- + info->feature_discard = 1; +- err = xenbus_gather(XBT_NIL, info->xbdev->otherend, +- "discard-granularity", "%u", &discard_granularity, +- "discard-alignment", "%u", &discard_alignment, +- NULL); +- if (!err) { +- info->discard_granularity = discard_granularity; +- info->discard_alignment = discard_alignment; +- } ++ info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend, ++ "discard-granularity", ++ 0); ++ info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend, ++ "discard-alignment", 0); + info->feature_secdiscard = + !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure", + 0); +diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig +index 0dbee32da4c6d..5d995fe64b5ca 100644 +--- a/drivers/firmware/imx/Kconfig ++++ b/drivers/firmware/imx/Kconfig +@@ -13,6 +13,7 @@ config IMX_DSP + config IMX_SCU + bool "IMX SCU Protocol driver" + depends on IMX_MBOX ++ select SOC_BUS + help + The System Controller Firmware (SCFW) is a low-level system function + which runs on a dedicated Cortex-M core to provide power, clock, and +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h +index 126a0eb6e0542..00335a1c02b0e 100644 +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -1894,7 +1894,7 @@ intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p) + { + const unsigned int pi = __platform_mask_index(info, p); + +- return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS; ++ return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1); + } + + static __always_inline bool +diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c +index 0be4668c780bf..8556804e96efd 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_svm.c ++++ b/drivers/gpu/drm/nouveau/nouveau_svm.c +@@ -306,6 +306,10 @@ nouveau_svmm_init(struct drm_device *dev, void *data, + struct drm_nouveau_svm_init *args = data; + int ret; + ++ /* We need to fail if svm is disabled */ ++ if (!cli->drm->svm) ++ return -ENOSYS; ++ + /* Allocate tracking for SVM-enabled VMM. */ + if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL))) + return -ENOMEM; +diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c +index 89ac2f9ae6dd8..e7472f0da59d2 100644 +--- a/drivers/infiniband/hw/cxgb4/qp.c ++++ b/drivers/infiniband/hw/cxgb4/qp.c +@@ -2471,7 +2471,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; + init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; + init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; +- init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges; ++ init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges; + init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE; + init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; + return 0; +diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c +index 30ac0ba55864e..1b9795743276d 100644 +--- a/drivers/iommu/dmar.c ++++ b/drivers/iommu/dmar.c +@@ -1020,8 +1020,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) + { + struct intel_iommu *iommu; + u32 ver, sts; +- int agaw = 0; +- int msagaw = 0; ++ int agaw = -1; ++ int msagaw = -1; + int err; + + if (!drhd->reg_base_addr) { +@@ -1046,17 +1046,28 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) + } + + err = -EINVAL; +- agaw = iommu_calculate_agaw(iommu); +- if (agaw < 0) { +- pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", +- iommu->seq_id); +- goto err_unmap; ++ if (cap_sagaw(iommu->cap) == 0) { ++ pr_info("%s: No supported address widths. Not attempting DMA translation.\n", ++ iommu->name); ++ drhd->ignored = 1; + } +- msagaw = iommu_calculate_max_sagaw(iommu); +- if (msagaw < 0) { +- pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", +- iommu->seq_id); +- goto err_unmap; ++ ++ if (!drhd->ignored) { ++ agaw = iommu_calculate_agaw(iommu); ++ if (agaw < 0) { ++ pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", ++ iommu->seq_id); ++ drhd->ignored = 1; ++ } ++ } ++ if (!drhd->ignored) { ++ msagaw = iommu_calculate_max_sagaw(iommu); ++ if (msagaw < 0) { ++ pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", ++ iommu->seq_id); ++ drhd->ignored = 1; ++ agaw = -1; ++ } + } + iommu->agaw = agaw; + iommu->msagaw = msagaw; +@@ -1083,7 +1094,12 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) + + raw_spin_lock_init(&iommu->register_lock); + +- if (intel_iommu_enabled) { ++ /* ++ * This is only for hotplug; at boot time intel_iommu_enabled won't ++ * be set yet. When intel_iommu_init() runs, it registers the units ++ * present at boot time, then sets intel_iommu_enabled. ++ */ ++ if (intel_iommu_enabled && !drhd->ignored) { + err = iommu_device_sysfs_add(&iommu->iommu, NULL, + intel_iommu_groups, + "%s", iommu->name); +@@ -1098,6 +1114,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) + } + + drhd->iommu = iommu; ++ iommu->drhd = drhd; + + return 0; + +@@ -1112,7 +1129,7 @@ error: + + static void free_iommu(struct intel_iommu *iommu) + { +- if (intel_iommu_enabled) { ++ if (intel_iommu_enabled && !iommu->drhd->ignored) { + iommu_device_unregister(&iommu->iommu); + iommu_device_sysfs_remove(&iommu->iommu); + } +diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c +index 23963e5cb5d6a..0d59763e40de1 100644 +--- a/drivers/leds/led-triggers.c ++++ b/drivers/leds/led-triggers.c +@@ -318,14 +318,15 @@ void led_trigger_event(struct led_trigger *trig, + enum led_brightness brightness) + { + struct led_classdev *led_cdev; ++ unsigned long flags; + + if (!trig) + return; + +- read_lock(&trig->leddev_list_lock); ++ read_lock_irqsave(&trig->leddev_list_lock, flags); + list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) + led_set_brightness(led_cdev, brightness); +- read_unlock(&trig->leddev_list_lock); ++ read_unlock_irqrestore(&trig->leddev_list_lock, flags); + } + EXPORT_SYMBOL_GPL(led_trigger_event); + +@@ -336,11 +337,12 @@ static void led_trigger_blink_setup(struct led_trigger *trig, + int invert) + { + struct led_classdev *led_cdev; ++ unsigned long flags; + + if (!trig) + return; + +- read_lock(&trig->leddev_list_lock); ++ read_lock_irqsave(&trig->leddev_list_lock, flags); + list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) { + if (oneshot) + led_blink_set_oneshot(led_cdev, delay_on, delay_off, +@@ -348,7 +350,7 @@ static void led_trigger_blink_setup(struct led_trigger *trig, + else + led_blink_set(led_cdev, delay_on, delay_off); + } +- read_unlock(&trig->leddev_list_lock); ++ read_unlock_irqrestore(&trig->leddev_list_lock, flags); + } + + void led_trigger_blink(struct led_trigger *trig, +diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c +index e84f9dccf448a..c4d7e06974d2c 100644 +--- a/drivers/media/rc/rc-main.c ++++ b/drivers/media/rc/rc-main.c +@@ -1892,6 +1892,8 @@ int rc_register_device(struct rc_dev *dev) + goto out_raw; + } + ++ dev->registered = true; ++ + rc = device_add(&dev->dev); + if (rc) + goto out_rx_free; +@@ -1901,8 +1903,6 @@ int rc_register_device(struct rc_dev *dev) + dev->device_name ?: "Unspecified device", path ?: "N/A"); + kfree(path); + +- dev->registered = true; +- + /* + * once the the input device is registered in rc_setup_rx_device, + * userspace can open the input device and rc_open() will be called +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c +index 247aeacb3a440..2ae9feb99a07d 100644 +--- a/drivers/net/can/dev.c ++++ b/drivers/net/can/dev.c +@@ -1134,7 +1134,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) + { + struct can_priv *priv = netdev_priv(dev); + struct can_ctrlmode cm = {.flags = priv->ctrlmode}; +- struct can_berr_counter bec; ++ struct can_berr_counter bec = { }; + enum can_state state = priv->state; + + if (priv->do_get_state) +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +index c952212900fcf..c20dc689698ed 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +@@ -3980,20 +3980,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) + goto error_param; + + vf = &pf->vf[vf_id]; +- vsi = pf->vsi[vf->lan_vsi_idx]; + + /* When the VF is resetting wait until it is done. + * It can take up to 200 milliseconds, + * but wait for up to 300 milliseconds to be safe. +- * If the VF is indeed in reset, the vsi pointer has +- * to show on the newly loaded vsi under pf->vsi[id]. ++ * Acquire the VSI pointer only after the VF has been ++ * properly initialized. + */ + for (i = 0; i < 15; i++) { +- if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { +- if (i > 0) +- vsi = pf->vsi[vf->lan_vsi_idx]; ++ if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) + break; +- } + msleep(20); + } + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { +@@ -4002,6 +3998,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) + ret = -EAGAIN; + goto error_param; + } ++ vsi = pf->vsi[vf->lan_vsi_idx]; + + if (is_multicast_ether_addr(mac)) { + dev_err(&pf->pdev->dev, +diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c +index ac98f1d968921..0303eeb760505 100644 +--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c ++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c +@@ -1670,12 +1670,18 @@ static int igc_get_link_ksettings(struct net_device *netdev, + cmd->base.phy_address = hw->phy.addr; + + /* advertising link modes */ +- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); +- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); +- ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); +- ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); +- ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); +- ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); ++ if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF) ++ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); ++ if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL) ++ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); ++ if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF) ++ ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); ++ if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL) ++ ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); ++ if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL) ++ ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); ++ if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL) ++ ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); + + /* set autoneg settings */ + if (hw->mac.autoneg == 1) { +@@ -1786,6 +1792,12 @@ static int igc_set_link_ksettings(struct net_device *netdev, + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); ++ /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT. ++ * We have to check this and convert it to ADVERTISE_2500_FULL ++ * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly. ++ */ ++ if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full)) ++ advertising |= ADVERTISE_2500_FULL; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index ec117e4414250..6495c26d95969 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -57,6 +57,7 @@ + #include "lib/devcom.h" + #include "lib/geneve.h" + #include "diag/en_tc_tracepoint.h" ++#include + + struct mlx5_nic_flow_attr { + u32 action; +@@ -1837,8 +1838,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, + BIT(FLOW_DISSECTOR_KEY_ENC_IP) | + BIT(FLOW_DISSECTOR_KEY_ENC_OPTS))) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); +- netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n", +- dissector->used_keys); ++ netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n", ++ dissector->used_keys); + return -EOPNOTSUPP; + } + +@@ -3943,13 +3944,13 @@ errout: + return err; + } + +-static int apply_police_params(struct mlx5e_priv *priv, u32 rate, ++static int apply_police_params(struct mlx5e_priv *priv, u64 rate, + struct netlink_ext_ack *extack) + { + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch *esw; ++ u32 rate_mbps = 0; + u16 vport_num; +- u32 rate_mbps; + int err; + + vport_num = rpriv->rep->vport; +@@ -3966,7 +3967,11 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate, + * Moreover, if rate is non zero we choose to configure to a minimum of + * 1 mbit/sec. + */ +- rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0; ++ if (rate) { ++ rate = (rate * BITS_PER_BYTE) + 500000; ++ rate_mbps = max_t(u32, do_div(rate, 1000000), 1); ++ } ++ + err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); + if (err) + NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware"); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 2eceb72f0f647..4944c40436f08 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -1068,6 +1068,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa + destroy_ft: + root->cmds->destroy_flow_table(root, ft); + free_ft: ++ rhltable_destroy(&ft->fgs_hash); + kfree(ft); + unlock_root: + mutex_unlock(&root->chain_lock); +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c +index 606fee99221b8..0eb894b7c0bda 100644 +--- a/drivers/net/team/team.c ++++ b/drivers/net/team/team.c +@@ -991,7 +991,8 @@ static void __team_compute_features(struct team *team) + unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | + IFF_XMIT_DST_RELEASE_PERM; + +- list_for_each_entry(port, &team->port_list, list) { ++ rcu_read_lock(); ++ list_for_each_entry_rcu(port, &team->port_list, list) { + vlan_features = netdev_increment_features(vlan_features, + port->dev->vlan_features, + TEAM_VLAN_FEATURES); +@@ -1005,6 +1006,7 @@ static void __team_compute_features(struct team *team) + if (port->dev->hard_header_len > max_hard_header_len) + max_hard_header_len = port->dev->hard_header_len; + } ++ rcu_read_unlock(); + + team->dev->vlan_features = vlan_features; + team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | +@@ -1020,9 +1022,7 @@ static void __team_compute_features(struct team *team) + + static void team_compute_features(struct team *team) + { +- mutex_lock(&team->lock); + __team_compute_features(team); +- mutex_unlock(&team->lock); + netdev_change_features(team->dev); + } + +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index b0d748a614a9e..72a3a5dc51319 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -1347,6 +1347,7 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */ + {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ + {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ ++ {QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */ + {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ + {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */ + {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */ +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +index ef5a8ecabc60a..0581f082301e0 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +@@ -2183,7 +2183,8 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, + + while (offs < dwords) { + /* limit the time we spin here under lock to 1/2s */ +- ktime_t timeout = ktime_add_us(ktime_get(), 500 * USEC_PER_MSEC); ++ unsigned long end = jiffies + HZ / 2; ++ bool resched = false; + + if (iwl_trans_grab_nic_access(trans, &flags)) { + iwl_write32(trans, HBUS_TARG_MEM_RADDR, +@@ -2194,14 +2195,15 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, + HBUS_TARG_MEM_RDAT); + offs++; + +- /* calling ktime_get is expensive so +- * do it once in 128 reads +- */ +- if (offs % 128 == 0 && ktime_after(ktime_get(), +- timeout)) ++ if (time_after(jiffies, end)) { ++ resched = true; + break; ++ } + } + iwl_trans_release_nic_access(trans, &flags); ++ ++ if (resched) ++ cond_resched(); + } else { + return -EBUSY; + } +diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c +index f6a0454abe044..6f2172be7b66a 100644 +--- a/drivers/net/wireless/mediatek/mt7601u/dma.c ++++ b/drivers/net/wireless/mediatek/mt7601u/dma.c +@@ -152,8 +152,7 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e) + + if (new_p) { + /* we have one extra ref from the allocator */ +- __free_pages(e->p, MT_RX_ORDER); +- ++ put_page(e->p); + e->p = new_p; + } + } +@@ -310,7 +309,6 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, + } + + e = &q->e[q->end]; +- e->skb = skb; + usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len, + mt7601u_complete_tx, q); + ret = usb_submit_urb(e->urb, GFP_ATOMIC); +@@ -328,6 +326,7 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, + + q->end = (q->end + 1) % q->entries; + q->used++; ++ e->skb = skb; + + if (q->used >= q->entries) + ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c +index 3968f89f7855a..0ac0bd4c65c4c 100644 +--- a/drivers/nvme/host/multipath.c ++++ b/drivers/nvme/host/multipath.c +@@ -233,7 +233,7 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head, + } + + for (ns = nvme_next_ns(head, old); +- ns != old; ++ ns && ns != old; + ns = nvme_next_ns(head, ns)) { + if (nvme_path_is_disabled(ns)) + continue; +diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c +index be2520cc010be..7dc72cb718b0e 100644 +--- a/drivers/s390/crypto/vfio_ap_drv.c ++++ b/drivers/s390/crypto/vfio_ap_drv.c +@@ -71,15 +71,11 @@ static int vfio_ap_queue_dev_probe(struct ap_device *apdev) + static void vfio_ap_queue_dev_remove(struct ap_device *apdev) + { + struct vfio_ap_queue *q; +- int apid, apqi; + + mutex_lock(&matrix_dev->lock); + q = dev_get_drvdata(&apdev->device); ++ vfio_ap_mdev_reset_queue(q, 1); + dev_set_drvdata(&apdev->device, NULL); +- apid = AP_QID_CARD(q->apqn); +- apqi = AP_QID_QUEUE(q->apqn); +- vfio_ap_mdev_reset_queue(apid, apqi, 1); +- vfio_ap_irq_disable(q); + kfree(q); + mutex_unlock(&matrix_dev->lock); + } +diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c +index 5c0f53c6dde75..790b0b2b36272 100644 +--- a/drivers/s390/crypto/vfio_ap_ops.c ++++ b/drivers/s390/crypto/vfio_ap_ops.c +@@ -25,6 +25,7 @@ + #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device" + + static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev); ++static struct vfio_ap_queue *vfio_ap_find_queue(int apqn); + + static int match_apqn(struct device *dev, const void *data) + { +@@ -49,20 +50,15 @@ static struct vfio_ap_queue *vfio_ap_get_queue( + int apqn) + { + struct vfio_ap_queue *q; +- struct device *dev; + + if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm)) + return NULL; + if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) + return NULL; + +- dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL, +- &apqn, match_apqn); +- if (!dev) +- return NULL; +- q = dev_get_drvdata(dev); +- q->matrix_mdev = matrix_mdev; +- put_device(dev); ++ q = vfio_ap_find_queue(apqn); ++ if (q) ++ q->matrix_mdev = matrix_mdev; + + return q; + } +@@ -119,13 +115,18 @@ static void vfio_ap_wait_for_irqclear(int apqn) + */ + static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q) + { +- if (q->saved_isc != VFIO_AP_ISC_INVALID && q->matrix_mdev) ++ if (!q) ++ return; ++ if (q->saved_isc != VFIO_AP_ISC_INVALID && ++ !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) { + kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc); +- if (q->saved_pfn && q->matrix_mdev) ++ q->saved_isc = VFIO_AP_ISC_INVALID; ++ } ++ if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) { + vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), + &q->saved_pfn, 1); +- q->saved_pfn = 0; +- q->saved_isc = VFIO_AP_ISC_INVALID; ++ q->saved_pfn = 0; ++ } + } + + /** +@@ -144,7 +145,7 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q) + * Returns if ap_aqic function failed with invalid, deconfigured or + * checkstopped AP. + */ +-struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q) ++static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q) + { + struct ap_qirq_ctrl aqic_gisa = {}; + struct ap_queue_status status; +@@ -1114,48 +1115,70 @@ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb, + return NOTIFY_OK; + } + +-static void vfio_ap_irq_disable_apqn(int apqn) ++static struct vfio_ap_queue *vfio_ap_find_queue(int apqn) + { + struct device *dev; +- struct vfio_ap_queue *q; ++ struct vfio_ap_queue *q = NULL; + + dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL, + &apqn, match_apqn); + if (dev) { + q = dev_get_drvdata(dev); +- vfio_ap_irq_disable(q); + put_device(dev); + } ++ ++ return q; + } + +-int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi, ++int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, + unsigned int retry) + { + struct ap_queue_status status; ++ int ret; + int retry2 = 2; +- int apqn = AP_MKQID(apid, apqi); + +- do { +- status = ap_zapq(apqn); +- switch (status.response_code) { +- case AP_RESPONSE_NORMAL: +- while (!status.queue_empty && retry2--) { +- msleep(20); +- status = ap_tapq(apqn, NULL); +- } +- WARN_ON_ONCE(retry2 <= 0); +- return 0; +- case AP_RESPONSE_RESET_IN_PROGRESS: +- case AP_RESPONSE_BUSY: ++ if (!q) ++ return 0; ++ ++retry_zapq: ++ status = ap_zapq(q->apqn); ++ switch (status.response_code) { ++ case AP_RESPONSE_NORMAL: ++ ret = 0; ++ break; ++ case AP_RESPONSE_RESET_IN_PROGRESS: ++ if (retry--) { + msleep(20); +- break; +- default: +- /* things are really broken, give up */ +- return -EIO; ++ goto retry_zapq; + } +- } while (retry--); ++ ret = -EBUSY; ++ break; ++ case AP_RESPONSE_Q_NOT_AVAIL: ++ case AP_RESPONSE_DECONFIGURED: ++ case AP_RESPONSE_CHECKSTOPPED: ++ WARN_ON_ONCE(status.irq_enabled); ++ ret = -EBUSY; ++ goto free_resources; ++ default: ++ /* things are really broken, give up */ ++ WARN(true, "PQAP/ZAPQ completed with invalid rc (%x)\n", ++ status.response_code); ++ return -EIO; ++ } ++ ++ /* wait for the reset to take effect */ ++ while (retry2--) { ++ if (status.queue_empty && !status.irq_enabled) ++ break; ++ msleep(20); ++ status = ap_tapq(q->apqn, NULL); ++ } ++ WARN_ON_ONCE(retry2 <= 0); + +- return -EBUSY; ++free_resources: ++ vfio_ap_free_aqic_resources(q); ++ ++ return ret; + } + + static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev) +@@ -1163,13 +1186,15 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev) + int ret; + int rc = 0; + unsigned long apid, apqi; ++ struct vfio_ap_queue *q; + struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + + for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, + matrix_mdev->matrix.apm_max + 1) { + for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, + matrix_mdev->matrix.aqm_max + 1) { +- ret = vfio_ap_mdev_reset_queue(apid, apqi, 1); ++ q = vfio_ap_find_queue(AP_MKQID(apid, apqi)); ++ ret = vfio_ap_mdev_reset_queue(q, 1); + /* + * Regardless whether a queue turns out to be busy, or + * is not operational, we need to continue resetting +@@ -1177,7 +1202,6 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev) + */ + if (ret) + rc = ret; +- vfio_ap_irq_disable_apqn(AP_MKQID(apid, apqi)); + } + } + +diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h +index f46dde56b4644..28e9d99897682 100644 +--- a/drivers/s390/crypto/vfio_ap_private.h ++++ b/drivers/s390/crypto/vfio_ap_private.h +@@ -88,11 +88,6 @@ struct ap_matrix_mdev { + struct mdev_device *mdev; + }; + +-extern int vfio_ap_mdev_register(void); +-extern void vfio_ap_mdev_unregister(void); +-int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi, +- unsigned int retry); +- + struct vfio_ap_queue { + struct ap_matrix_mdev *matrix_mdev; + unsigned long saved_pfn; +@@ -100,5 +95,10 @@ struct vfio_ap_queue { + #define VFIO_AP_ISC_INVALID 0xff + unsigned char saved_isc; + }; +-struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q); ++ ++int vfio_ap_mdev_register(void); ++void vfio_ap_mdev_unregister(void); ++int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, ++ unsigned int retry); ++ + #endif /* _VFIO_AP_PRIVATE_H_ */ +diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c +index 096a83cf0caf3..4b4174597150d 100644 +--- a/drivers/soc/atmel/soc.c ++++ b/drivers/soc/atmel/soc.c +@@ -264,8 +264,21 @@ struct soc_device * __init at91_soc_init(const struct at91_soc *socs) + return soc_dev; + } + ++static const struct of_device_id at91_soc_allowed_list[] __initconst = { ++ { .compatible = "atmel,at91rm9200", }, ++ { .compatible = "atmel,at91sam9", }, ++ { .compatible = "atmel,sama5", }, ++ { .compatible = "atmel,samv7", }, ++ { } ++}; ++ + static int __init atmel_soc_device_init(void) + { ++ struct device_node *np = of_find_node_by_path("/"); ++ ++ if (!of_match_node(at91_soc_allowed_list, np)) ++ return 0; ++ + at91_soc_init(socs); + + return 0; +diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c +index aadedec3bfe7b..ea79482ebda46 100644 +--- a/drivers/tee/optee/call.c ++++ b/drivers/tee/optee/call.c +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -148,7 +149,8 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) + */ + optee_cq_wait_for_completion(&optee->call_queue, &w); + } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) { +- might_sleep(); ++ if (need_resched()) ++ cond_resched(); + param.a0 = res.a0; + param.a1 = res.a1; + param.a2 = res.a2; +diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c +index 14ccf13ab8fa1..786494bb7f20b 100644 +--- a/drivers/xen/xenbus/xenbus_probe.c ++++ b/drivers/xen/xenbus/xenbus_probe.c +@@ -714,6 +714,23 @@ static bool xs_hvm_defer_init_for_callback(void) + #endif + } + ++static int xenbus_probe_thread(void *unused) ++{ ++ DEFINE_WAIT(w); ++ ++ /* ++ * We actually just want to wait for *any* trigger of xb_waitq, ++ * and run xenbus_probe() the moment it occurs. ++ */ ++ prepare_to_wait(&xb_waitq, &w, TASK_INTERRUPTIBLE); ++ schedule(); ++ finish_wait(&xb_waitq, &w); ++ ++ DPRINTK("probing"); ++ xenbus_probe(); ++ return 0; ++} ++ + static int __init xenbus_probe_initcall(void) + { + /* +@@ -725,6 +742,20 @@ static int __init xenbus_probe_initcall(void) + !xs_hvm_defer_init_for_callback())) + xenbus_probe(); + ++ /* ++ * For XS_LOCAL, spawn a thread which will wait for xenstored ++ * or a xenstore-stubdom to be started, then probe. It will be ++ * triggered when communication starts happening, by waiting ++ * on xb_waitq. ++ */ ++ if (xen_store_domain_type == XS_LOCAL) { ++ struct task_struct *probe_task; ++ ++ probe_task = kthread_run(xenbus_probe_thread, NULL, ++ "xenbus_probe"); ++ if (IS_ERR(probe_task)) ++ return PTR_ERR(probe_task); ++ } + return 0; + } + device_initcall(xenbus_probe_initcall); +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c +index 59e7a2ad440fc..a32f23981f60f 100644 +--- a/fs/btrfs/block-group.c ++++ b/fs/btrfs/block-group.c +@@ -640,7 +640,15 @@ static noinline void caching_thread(struct btrfs_work *work) + mutex_lock(&caching_ctl->mutex); + down_read(&fs_info->commit_root_sem); + +- if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) ++ /* ++ * If we are in the transaction that populated the free space tree we ++ * can't actually cache from the free space tree as our commit root and ++ * real root are the same, so we could change the contents of the blocks ++ * while caching. Instead do the slow caching in this case, and after ++ * the transaction has committed we will be safe. ++ */ ++ if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && ++ !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags))) + ret = load_free_space_tree(caching_ctl); + else + ret = load_extent_tree_free(caching_ctl); +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index 27128164fac97..cda5534d3d0e3 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -136,6 +136,9 @@ enum { + BTRFS_FS_STATE_DEV_REPLACING, + /* The btrfs_fs_info created for self-tests */ + BTRFS_FS_STATE_DUMMY_FS_INFO, ++ ++ /* Indicate that we can't trust the free space tree for caching yet */ ++ BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, + }; + + #define BTRFS_BACKREF_REV_MAX 256 +diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c +index 48a03f5240f59..dfabbbfc94ccb 100644 +--- a/fs/btrfs/free-space-tree.c ++++ b/fs/btrfs/free-space-tree.c +@@ -1149,6 +1149,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info) + return PTR_ERR(trans); + + set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); ++ set_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags); + free_space_root = btrfs_create_tree(trans, + BTRFS_FREE_SPACE_TREE_OBJECTID); + if (IS_ERR(free_space_root)) { +@@ -1170,11 +1171,18 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info) + btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE); + btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID); + clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); ++ ret = btrfs_commit_transaction(trans); + +- return btrfs_commit_transaction(trans); ++ /* ++ * Now that we've committed the transaction any reading of our commit ++ * root will be safe, so we can cache from the free space tree now. ++ */ ++ clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags); ++ return ret; + + abort: + clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); ++ clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags); + btrfs_abort_transaction(trans, ret); + btrfs_end_transaction(trans); + return ret; +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c +index 4232f956bdac0..ca1d98f274d12 100644 +--- a/fs/nfs/pnfs.c ++++ b/fs/nfs/pnfs.c +@@ -2388,6 +2388,7 @@ out_forget: + spin_unlock(&ino->i_lock); + lseg->pls_layout = lo; + NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg); ++ pnfs_free_lseg_list(&free_me); + return ERR_PTR(-EAGAIN); + } + +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h +index 6b559d25a84ee..88ac8edf44e31 100644 +--- a/include/linux/intel-iommu.h ++++ b/include/linux/intel-iommu.h +@@ -556,6 +556,8 @@ struct intel_iommu { + struct iommu_device iommu; /* IOMMU core code handle */ + int node; + u32 flags; /* Software defined flags */ ++ ++ struct dmar_drhd_unit *drhd; + }; + + /* PCI domain-device relationship */ +diff --git a/include/net/tcp.h b/include/net/tcp.h +index 377179283c46c..4b38ba101b9b7 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -2030,7 +2030,7 @@ void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb); + void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced); + extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, + u32 reo_wnd); +-extern void tcp_rack_mark_lost(struct sock *sk); ++extern bool tcp_rack_mark_lost(struct sock *sk); + extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, + u64 xmit_time); + extern void tcp_rack_reo_timeout(struct sock *sk); +diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h +index 2622b5a3e6163..9a31ea2ad1cfc 100644 +--- a/include/uapi/linux/icmpv6.h ++++ b/include/uapi/linux/icmpv6.h +@@ -137,6 +137,7 @@ struct icmp6hdr { + #define ICMPV6_HDR_FIELD 0 + #define ICMPV6_UNK_NEXTHDR 1 + #define ICMPV6_UNK_OPTION 2 ++#define ICMPV6_HDR_INCOMP 3 + + /* + * constants for (set|get)sockopt +diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c +index 15d70a90b50dc..d65b0fc8fb48b 100644 +--- a/kernel/kexec_core.c ++++ b/kernel/kexec_core.c +@@ -1129,7 +1129,6 @@ int kernel_kexec(void) + + #ifdef CONFIG_KEXEC_JUMP + if (kexec_image->preserve_context) { +- lock_system_sleep(); + pm_prepare_console(); + error = freeze_processes(); + if (error) { +@@ -1192,7 +1191,6 @@ int kernel_kexec(void) + thaw_processes(); + Restore_console: + pm_restore_console(); +- unlock_system_sleep(); + } + #endif + +diff --git a/kernel/power/swap.c b/kernel/power/swap.c +index ca0fcb5ced714..0516c422206d8 100644 +--- a/kernel/power/swap.c ++++ b/kernel/power/swap.c +@@ -489,10 +489,10 @@ static int swap_writer_finish(struct swap_map_handle *handle, + unsigned int flags, int error) + { + if (!error) { +- flush_swap_writer(handle); + pr_info("S"); + error = mark_swapfiles(handle, flags); + pr_cont("|\n"); ++ flush_swap_writer(handle); + } + + if (error) +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 7411a43134629..26305aa88651f 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -2764,7 +2764,8 @@ static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag) + } else if (tcp_is_rack(sk)) { + u32 prior_retrans = tp->retrans_out; + +- tcp_rack_mark_lost(sk); ++ if (tcp_rack_mark_lost(sk)) ++ *ack_flag &= ~FLAG_SET_XMIT_TIMER; + if (prior_retrans > tp->retrans_out) + *ack_flag |= FLAG_LOST_RETRANS; + } +@@ -3713,9 +3714,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) + + if (tp->tlp_high_seq) + tcp_process_tlp_ack(sk, ack, flag); +- /* If needed, reset TLP/RTO timer; RACK may later override this. */ +- if (flag & FLAG_SET_XMIT_TIMER) +- tcp_set_xmit_timer(sk); + + if (tcp_ack_is_dubious(sk, flag)) { + if (!(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP))) { +@@ -3728,6 +3726,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) + &rexmit); + } + ++ /* If needed, reset TLP/RTO timer when RACK doesn't set. */ ++ if (flag & FLAG_SET_XMIT_TIMER) ++ tcp_set_xmit_timer(sk); ++ + if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) + sk_dst_confirm(sk); + +diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c +index fdb715bdd2d11..8757bb6cb1d93 100644 +--- a/net/ipv4/tcp_recovery.c ++++ b/net/ipv4/tcp_recovery.c +@@ -110,13 +110,13 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout) + } + } + +-void tcp_rack_mark_lost(struct sock *sk) ++bool tcp_rack_mark_lost(struct sock *sk) + { + struct tcp_sock *tp = tcp_sk(sk); + u32 timeout; + + if (!tp->rack.advanced) +- return; ++ return false; + + /* Reset the advanced flag to avoid unnecessary queue scanning */ + tp->rack.advanced = 0; +@@ -126,6 +126,7 @@ void tcp_rack_mark_lost(struct sock *sk) + inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT, + timeout, inet_csk(sk)->icsk_rto); + } ++ return !!timeout; + } + + /* Record the most recently (re)sent time among the (s)acked packets +diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c +index 7d3a3894f785c..e9bb89131e02a 100644 +--- a/net/ipv6/icmp.c ++++ b/net/ipv6/icmp.c +@@ -158,7 +158,13 @@ static bool is_ineligible(const struct sk_buff *skb) + tp = skb_header_pointer(skb, + ptr+offsetof(struct icmp6hdr, icmp6_type), + sizeof(_type), &_type); +- if (!tp || !(*tp & ICMPV6_INFOMSG_MASK)) ++ ++ /* Based on RFC 8200, Section 4.5 Fragment Header, return ++ * false if this is a fragment packet with no icmp header info. ++ */ ++ if (!tp && frag_off != 0) ++ return false; ++ else if (!tp || !(*tp & ICMPV6_INFOMSG_MASK)) + return true; + } + return false; +diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c +index 1f5d4d196dcce..c8cf1bbad74a2 100644 +--- a/net/ipv6/reassembly.c ++++ b/net/ipv6/reassembly.c +@@ -42,6 +42,8 @@ + #include + #include + #include ++#include ++#include + + #include + #include +@@ -322,7 +324,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb) + struct frag_queue *fq; + const struct ipv6hdr *hdr = ipv6_hdr(skb); + struct net *net = dev_net(skb_dst(skb)->dev); +- int iif; ++ __be16 frag_off; ++ int iif, offset; ++ u8 nexthdr; + + if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED) + goto fail_hdr; +@@ -351,6 +355,33 @@ static int ipv6_frag_rcv(struct sk_buff *skb) + return 1; + } + ++ /* RFC 8200, Section 4.5 Fragment Header: ++ * If the first fragment does not include all headers through an ++ * Upper-Layer header, then that fragment should be discarded and ++ * an ICMP Parameter Problem, Code 3, message should be sent to ++ * the source of the fragment, with the Pointer field set to zero. ++ */ ++ nexthdr = hdr->nexthdr; ++ offset = ipv6_skip_exthdr(skb, skb_transport_offset(skb), &nexthdr, &frag_off); ++ if (offset >= 0) { ++ /* Check some common protocols' header */ ++ if (nexthdr == IPPROTO_TCP) ++ offset += sizeof(struct tcphdr); ++ else if (nexthdr == IPPROTO_UDP) ++ offset += sizeof(struct udphdr); ++ else if (nexthdr == IPPROTO_ICMPV6) ++ offset += sizeof(struct icmp6hdr); ++ else ++ offset += 1; ++ ++ if (!(frag_off & htons(IP6_OFFSET)) && offset > skb->len) { ++ __IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev), ++ IPSTATS_MIB_INHDRERRORS); ++ icmpv6_param_prob(skb, ICMPV6_HDR_INCOMP, 0); ++ return -1; ++ } ++ } ++ + iif = skb->dev ? skb->dev->ifindex : 0; + fq = fq_find(net, fhdr->identification, hdr, iif); + if (fq) { +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h +index 05406e9c05b32..268f1d8f440ba 100644 +--- a/net/mac80211/ieee80211_i.h ++++ b/net/mac80211/ieee80211_i.h +@@ -1061,6 +1061,7 @@ enum queue_stop_reason { + IEEE80211_QUEUE_STOP_REASON_FLUSH, + IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN, + IEEE80211_QUEUE_STOP_REASON_RESERVE_TID, ++ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE, + + IEEE80211_QUEUE_STOP_REASONS, + }; +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c +index af8b09214786d..6089b09ec13b6 100644 +--- a/net/mac80211/iface.c ++++ b/net/mac80211/iface.c +@@ -1537,6 +1537,10 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, + if (ret) + return ret; + ++ ieee80211_stop_vif_queues(local, sdata, ++ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE); ++ synchronize_net(); ++ + ieee80211_do_stop(sdata, false); + + ieee80211_teardown_sdata(sdata); +@@ -1557,6 +1561,8 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, + err = ieee80211_do_open(&sdata->wdev, false); + WARN(err, "type change: do_open returned %d", err); + ++ ieee80211_wake_vif_queues(local, sdata, ++ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE); + return ret; + } + +diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c +index 60236cc316d03..95415d2b81c93 100644 +--- a/net/netfilter/nft_dynset.c ++++ b/net/netfilter/nft_dynset.c +@@ -233,8 +233,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx, + nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_EXPR, + priv->expr->ops->size); + if (set->flags & NFT_SET_TIMEOUT) { +- if (timeout || set->timeout) ++ if (timeout || set->timeout) { ++ nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_TIMEOUT); + nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION); ++ } + } + + priv->timeout = timeout; +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c +index 4170acc2dc282..99b06a16b8086 100644 +--- a/net/nfc/netlink.c ++++ b/net/nfc/netlink.c +@@ -860,6 +860,7 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info) + + if (!dev->polling) { + device_unlock(&dev->dev); ++ nfc_put_device(dev); + return -EINVAL; + } + +diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c +index b5c867fe32324..23d5e56306a4c 100644 +--- a/net/nfc/rawsock.c ++++ b/net/nfc/rawsock.c +@@ -105,7 +105,7 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, + if (addr->target_idx > dev->target_next_idx - 1 || + addr->target_idx < dev->target_next_idx - dev->n_targets) { + rc = -EINVAL; +- goto error; ++ goto put_dev; + } + + rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol); +diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c +index 032ed76c0166d..55fb3744552de 100644 +--- a/net/rxrpc/call_accept.c ++++ b/net/rxrpc/call_accept.c +@@ -207,6 +207,7 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx) + tail = b->peer_backlog_tail; + while (CIRC_CNT(head, tail, size) > 0) { + struct rxrpc_peer *peer = b->peer_backlog[tail]; ++ rxrpc_put_local(peer->local); + kfree(peer); + tail = (tail + 1) & (size - 1); + } +diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c +index 69102fda9ebd4..76a80a41615be 100644 +--- a/net/wireless/wext-core.c ++++ b/net/wireless/wext-core.c +@@ -896,8 +896,9 @@ out: + int call_commit_handler(struct net_device *dev) + { + #ifdef CONFIG_WIRELESS_EXT +- if ((netif_running(dev)) && +- (dev->wireless_handlers->standard[0] != NULL)) ++ if (netif_running(dev) && ++ dev->wireless_handlers && ++ dev->wireless_handlers->standard[0]) + /* Call the commit handler on the driver */ + return dev->wireless_handlers->standard[0](dev, NULL, + NULL, NULL); +diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c +index 7a84745477919..e120df0a6da13 100644 +--- a/net/xfrm/xfrm_input.c ++++ b/net/xfrm/xfrm_input.c +@@ -656,7 +656,7 @@ resume: + /* only the first xfrm gets the encap type */ + encap_type = 0; + +- if (async && x->repl->recheck(x, skb, seq)) { ++ if (x->repl->recheck(x, skb, seq)) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); + goto drop_unlock; + } +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 2917711ff8ab6..32c8163427970 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -790,15 +790,22 @@ static int xfrm_policy_addr_delta(const xfrm_address_t *a, + const xfrm_address_t *b, + u8 prefixlen, u16 family) + { ++ u32 ma, mb, mask; + unsigned int pdw, pbi; + int delta = 0; + + switch (family) { + case AF_INET: +- if (sizeof(long) == 4 && prefixlen == 0) +- return ntohl(a->a4) - ntohl(b->a4); +- return (ntohl(a->a4) & ((~0UL << (32 - prefixlen)))) - +- (ntohl(b->a4) & ((~0UL << (32 - prefixlen)))); ++ if (prefixlen == 0) ++ return 0; ++ mask = ~0U << (32 - prefixlen); ++ ma = ntohl(a->a4) & mask; ++ mb = ntohl(b->a4) & mask; ++ if (ma < mb) ++ delta = -1; ++ else if (ma > mb) ++ delta = 1; ++ break; + case AF_INET6: + pdw = prefixlen >> 5; + pbi = prefixlen & 0x1f; +@@ -809,10 +816,13 @@ static int xfrm_policy_addr_delta(const xfrm_address_t *a, + return delta; + } + if (pbi) { +- u32 mask = ~0u << (32 - pbi); +- +- delta = (ntohl(a->a6[pdw]) & mask) - +- (ntohl(b->a6[pdw]) & mask); ++ mask = ~0U << (32 - pbi); ++ ma = ntohl(a->a6[pdw]) & mask; ++ mb = ntohl(b->a6[pdw]) & mask; ++ if (ma < mb) ++ delta = -1; ++ else if (ma > mb) ++ delta = 1; + } + break; + default: +@@ -3065,8 +3075,8 @@ struct dst_entry *xfrm_lookup_with_ifid(struct net *net, + xflo.flags = flags; + + /* To accelerate a bit... */ +- if ((dst_orig->flags & DST_NOXFRM) || +- !net->xfrm.policy_count[XFRM_POLICY_OUT]) ++ if (!if_id && ((dst_orig->flags & DST_NOXFRM) || ++ !net->xfrm.policy_count[XFRM_POLICY_OUT])) + goto nopol; + + xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id); +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 8adbe45a54c11..f548bd48bf729 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -7907,6 +7907,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE), ++ SND_PCI_QUIRK(0x1043, 0x1982, "ASUS B1400CEPE", ALC256_FIXUP_ASUS_HPE), + SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE), + SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), +diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c +index 834367dd54e1b..a5c1a2c4eae4e 100644 +--- a/sound/pci/hda/patch_via.c ++++ b/sound/pci/hda/patch_via.c +@@ -1043,7 +1043,7 @@ static const struct hda_fixup via_fixups[] = { + static const struct snd_pci_quirk vt2002p_fixups[] = { + SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75), + SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST), +- SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", VIA_FIXUP_POWER_SAVE), ++ SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", VIA_FIXUP_POWER_SAVE), + {} + }; + +diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c +index aa5833001fde5..2cb719893324a 100644 +--- a/sound/soc/intel/skylake/skl-topology.c ++++ b/sound/soc/intel/skylake/skl-topology.c +@@ -3619,15 +3619,16 @@ static void skl_tplg_complete(struct snd_soc_component *component) + + list_for_each_entry(dobj, &component->dobj_list, list) { + struct snd_kcontrol *kcontrol = dobj->control.kcontrol; +- struct soc_enum *se = +- (struct soc_enum *)kcontrol->private_value; +- char **texts = dobj->control.dtexts; ++ struct soc_enum *se; ++ char **texts; + char chan_text[4]; + +- if (dobj->type != SND_SOC_DOBJ_ENUM || +- dobj->control.kcontrol->put != +- skl_tplg_multi_config_set_dmic) ++ if (dobj->type != SND_SOC_DOBJ_ENUM || !kcontrol || ++ kcontrol->put != skl_tplg_multi_config_set_dmic) + continue; ++ ++ se = (struct soc_enum *)kcontrol->private_value; ++ texts = dobj->control.dtexts; + sprintf(chan_text, "c%d", mach->mach_params.dmic_num); + + for (i = 0; i < se->items; i++) { +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c +index 0100f123484e6..c367609433bfc 100644 +--- a/sound/soc/soc-topology.c ++++ b/sound/soc/soc-topology.c +@@ -982,7 +982,7 @@ static int soc_tplg_denum_create_values(struct soc_enum *se, + return -EINVAL; + + se->dobj.control.dvalues = kzalloc(le32_to_cpu(ec->items) * +- sizeof(u32), ++ sizeof(*se->dobj.control.dvalues), + GFP_KERNEL); + if (!se->dobj.control.dvalues) + return -ENOMEM; +diff --git a/tools/testing/selftests/net/forwarding/router_mpath_nh.sh b/tools/testing/selftests/net/forwarding/router_mpath_nh.sh +index cf3d26c233e8e..7fcc42bc076fa 100755 +--- a/tools/testing/selftests/net/forwarding/router_mpath_nh.sh ++++ b/tools/testing/selftests/net/forwarding/router_mpath_nh.sh +@@ -197,7 +197,7 @@ multipath4_test() + t0_rp12=$(link_stats_tx_packets_get $rp12) + t0_rp13=$(link_stats_tx_packets_get $rp13) + +- ip vrf exec vrf-h1 $MZ -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \ ++ ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \ + -d 1msec -t udp "sp=1024,dp=0-32768" + + t1_rp12=$(link_stats_tx_packets_get $rp12) +diff --git a/tools/testing/selftests/net/forwarding/router_multipath.sh b/tools/testing/selftests/net/forwarding/router_multipath.sh +index 79a2099279621..464821c587a5e 100755 +--- a/tools/testing/selftests/net/forwarding/router_multipath.sh ++++ b/tools/testing/selftests/net/forwarding/router_multipath.sh +@@ -178,7 +178,7 @@ multipath4_test() + t0_rp12=$(link_stats_tx_packets_get $rp12) + t0_rp13=$(link_stats_tx_packets_get $rp13) + +- ip vrf exec vrf-h1 $MZ -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \ ++ ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \ + -d 1msec -t udp "sp=1024,dp=0-32768" + + t1_rp12=$(link_stats_tx_packets_get $rp12) +diff --git a/tools/testing/selftests/net/xfrm_policy.sh b/tools/testing/selftests/net/xfrm_policy.sh +index 7a1bf94c5bd38..bdf450eaf60cf 100755 +--- a/tools/testing/selftests/net/xfrm_policy.sh ++++ b/tools/testing/selftests/net/xfrm_policy.sh +@@ -202,7 +202,7 @@ check_xfrm() { + # 1: iptables -m policy rule count != 0 + rval=$1 + ip=$2 +- lret=0 ++ local lret=0 + + ip netns exec ns1 ping -q -c 1 10.0.2.$ip > /dev/null + +@@ -287,6 +287,47 @@ check_hthresh_repeat() + return 0 + } + ++# insert non-overlapping policies in a random order and check that ++# all of them can be fetched using the traffic selectors. ++check_random_order() ++{ ++ local ns=$1 ++ local log=$2 ++ ++ for i in $(seq 100); do ++ ip -net $ns xfrm policy flush ++ for j in $(seq 0 16 255 | sort -R); do ++ ip -net $ns xfrm policy add dst $j.0.0.0/24 dir out priority 10 action allow ++ done ++ for j in $(seq 0 16 255); do ++ if ! ip -net $ns xfrm policy get dst $j.0.0.0/24 dir out > /dev/null; then ++ echo "FAIL: $log" 1>&2 ++ return 1 ++ fi ++ done ++ done ++ ++ for i in $(seq 100); do ++ ip -net $ns xfrm policy flush ++ for j in $(seq 0 16 255 | sort -R); do ++ local addr=$(printf "e000:0000:%02x00::/56" $j) ++ ip -net $ns xfrm policy add dst $addr dir out priority 10 action allow ++ done ++ for j in $(seq 0 16 255); do ++ local addr=$(printf "e000:0000:%02x00::/56" $j) ++ if ! ip -net $ns xfrm policy get dst $addr dir out > /dev/null; then ++ echo "FAIL: $log" 1>&2 ++ return 1 ++ fi ++ done ++ done ++ ++ ip -net $ns xfrm policy flush ++ ++ echo "PASS: $log" ++ return 0 ++} ++ + #check for needed privileges + if [ "$(id -u)" -ne 0 ];then + echo "SKIP: Need root privileges" +@@ -438,6 +479,8 @@ check_exceptions "exceptions and block policies after htresh change to normal" + + check_hthresh_repeat "policies with repeated htresh change" + ++check_random_order ns3 "policies inserted in random order" ++ + for i in 1 2 3 4;do ip netns del ns$i;done + + exit $ret +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index 8f3b40ec02b77..f25b5043cbcae 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -1017,6 +1017,7 @@ int __kvm_set_memory_region(struct kvm *kvm, + /* We can read the guest memory with __xxx_user() later on. */ + if ((id < KVM_USER_MEM_SLOTS) && + ((mem->userspace_addr & (PAGE_SIZE - 1)) || ++ (mem->userspace_addr != untagged_addr(mem->userspace_addr)) || + !access_ok((void __user *)(unsigned long)mem->userspace_addr, + mem->memory_size))) + goto out; diff --git a/patch/kernel/odroidxu4-current/patch-5.4.95-96.patch b/patch/kernel/odroidxu4-current/patch-5.4.95-96.patch new file mode 100644 index 000000000..5e7c4fa59 --- /dev/null +++ b/patch/kernel/odroidxu4-current/patch-5.4.95-96.patch @@ -0,0 +1,1218 @@ +diff --git a/Makefile b/Makefile +index aa3c2e834442e..7a47a2594f957 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 95 ++SUBLEVEL = 96 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h +index 51d867cf146c1..6c295a231882a 100644 +--- a/arch/arm64/include/asm/memory.h ++++ b/arch/arm64/include/asm/memory.h +@@ -247,11 +247,11 @@ static inline const void *__tag_set(const void *addr, u8 tag) + + + /* +- * The linear kernel range starts at the bottom of the virtual address +- * space. Testing the top bit for the start of the region is a +- * sufficient check and avoids having to worry about the tag. ++ * Check whether an arbitrary address is within the linear map, which ++ * lives in the [PAGE_OFFSET, PAGE_END) interval at the bottom of the ++ * kernel's TTBR1 address range. + */ +-#define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1))) ++#define __is_lm_address(addr) (((u64)(addr) ^ PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET)) + + #define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) + #define __kimg_to_phys(addr) ((addr) - kimage_voffset) +@@ -332,7 +332,7 @@ static inline void *phys_to_virt(phys_addr_t x) + #endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */ + + #define virt_addr_valid(addr) ({ \ +- __typeof__(addr) __addr = addr; \ ++ __typeof__(addr) __addr = __tag_reset(addr); \ + __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \ + }) + +diff --git a/arch/arm64/mm/physaddr.c b/arch/arm64/mm/physaddr.c +index 67a9ba9eaa96b..cde44c13dda1b 100644 +--- a/arch/arm64/mm/physaddr.c ++++ b/arch/arm64/mm/physaddr.c +@@ -9,7 +9,7 @@ + + phys_addr_t __virt_to_phys(unsigned long x) + { +- WARN(!__is_lm_address(x), ++ WARN(!__is_lm_address(__tag_reset(x)), + "virt_to_phys used for non-linear address: %pK (%pS)\n", + (void *)x, + (void *)x); +diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h +index 86f20d520a079..b40d0295d8129 100644 +--- a/arch/x86/include/asm/msr.h ++++ b/arch/x86/include/asm/msr.h +@@ -88,7 +88,7 @@ static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {} + * think of extending them - you will be slapped with a stinking trout or a frozen + * shark will reach you, wherever you are! You've been warned. + */ +-static inline unsigned long long notrace __rdmsr(unsigned int msr) ++static __always_inline unsigned long long __rdmsr(unsigned int msr) + { + DECLARE_ARGS(val, low, high); + +@@ -100,7 +100,7 @@ static inline unsigned long long notrace __rdmsr(unsigned int msr) + return EAX_EDX_VAL(val, low, high); + } + +-static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high) ++static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high) + { + asm volatile("1: wrmsr\n" + "2:\n" +diff --git a/block/blk-core.c b/block/blk-core.c +index d2213220099d3..5808baa950c35 100644 +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -886,11 +886,14 @@ generic_make_request_checks(struct bio *bio) + } + + /* +- * For a REQ_NOWAIT based request, return -EOPNOTSUPP +- * if queue is not a request based queue. ++ * Non-mq queues do not honor REQ_NOWAIT, so complete a bio ++ * with BLK_STS_AGAIN status in order to catch -EAGAIN and ++ * to give a chance to the caller to repeat request gracefully. + */ +- if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) +- goto not_supported; ++ if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) { ++ status = BLK_STS_AGAIN; ++ goto end_io; ++ } + + if (should_fail_bio(bio)) + goto end_io; +diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c +index d831a61e0010e..383c7029d3cee 100644 +--- a/drivers/acpi/thermal.c ++++ b/drivers/acpi/thermal.c +@@ -174,6 +174,8 @@ struct acpi_thermal { + int tz_enabled; + int kelvin_offset; + struct work_struct thermal_check_work; ++ struct mutex thermal_check_lock; ++ refcount_t thermal_check_count; + }; + + /* -------------------------------------------------------------------------- +@@ -494,17 +496,6 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz) + return 0; + } + +-static void acpi_thermal_check(void *data) +-{ +- struct acpi_thermal *tz = data; +- +- if (!tz->tz_enabled) +- return; +- +- thermal_zone_device_update(tz->thermal_zone, +- THERMAL_EVENT_UNSPECIFIED); +-} +- + /* sys I/F for generic thermal sysfs support */ + + static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp) +@@ -538,6 +529,8 @@ static int thermal_get_mode(struct thermal_zone_device *thermal, + return 0; + } + ++static void acpi_thermal_check_fn(struct work_struct *work); ++ + static int thermal_set_mode(struct thermal_zone_device *thermal, + enum thermal_device_mode mode) + { +@@ -563,7 +556,7 @@ static int thermal_set_mode(struct thermal_zone_device *thermal, + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "%s kernel ACPI thermal control\n", + tz->tz_enabled ? "Enable" : "Disable")); +- acpi_thermal_check(tz); ++ acpi_thermal_check_fn(&tz->thermal_check_work); + } + return 0; + } +@@ -932,6 +925,12 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz) + Driver Interface + -------------------------------------------------------------------------- */ + ++static void acpi_queue_thermal_check(struct acpi_thermal *tz) ++{ ++ if (!work_pending(&tz->thermal_check_work)) ++ queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work); ++} ++ + static void acpi_thermal_notify(struct acpi_device *device, u32 event) + { + struct acpi_thermal *tz = acpi_driver_data(device); +@@ -942,17 +941,17 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event) + + switch (event) { + case ACPI_THERMAL_NOTIFY_TEMPERATURE: +- acpi_thermal_check(tz); ++ acpi_queue_thermal_check(tz); + break; + case ACPI_THERMAL_NOTIFY_THRESHOLDS: + acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS); +- acpi_thermal_check(tz); ++ acpi_queue_thermal_check(tz); + acpi_bus_generate_netlink_event(device->pnp.device_class, + dev_name(&device->dev), event, 0); + break; + case ACPI_THERMAL_NOTIFY_DEVICES: + acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES); +- acpi_thermal_check(tz); ++ acpi_queue_thermal_check(tz); + acpi_bus_generate_netlink_event(device->pnp.device_class, + dev_name(&device->dev), event, 0); + break; +@@ -1052,7 +1051,27 @@ static void acpi_thermal_check_fn(struct work_struct *work) + { + struct acpi_thermal *tz = container_of(work, struct acpi_thermal, + thermal_check_work); +- acpi_thermal_check(tz); ++ ++ if (!tz->tz_enabled) ++ return; ++ /* ++ * In general, it is not sufficient to check the pending bit, because ++ * subsequent instances of this function may be queued after one of them ++ * has started running (e.g. if _TMP sleeps). Avoid bailing out if just ++ * one of them is running, though, because it may have done the actual ++ * check some time ago, so allow at least one of them to block on the ++ * mutex while another one is running the update. ++ */ ++ if (!refcount_dec_not_one(&tz->thermal_check_count)) ++ return; ++ ++ mutex_lock(&tz->thermal_check_lock); ++ ++ thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED); ++ ++ refcount_inc(&tz->thermal_check_count); ++ ++ mutex_unlock(&tz->thermal_check_lock); + } + + static int acpi_thermal_add(struct acpi_device *device) +@@ -1084,6 +1103,8 @@ static int acpi_thermal_add(struct acpi_device *device) + if (result) + goto free_memory; + ++ refcount_set(&tz->thermal_check_count, 3); ++ mutex_init(&tz->thermal_check_lock); + INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn); + + pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device), +@@ -1149,7 +1170,7 @@ static int acpi_thermal_resume(struct device *dev) + tz->state.active |= tz->trips.active[i].flags.enabled; + } + +- queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work); ++ acpi_queue_thermal_check(tz); + + return AE_OK; + } +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +index 959eb075d11ed..c18f39271b034 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +@@ -1914,6 +1914,9 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting + initial_link_setting; + uint32_t link_bw; + ++ if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) ++ return false; ++ + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +index bb7add5ea2273..a6d5beada6634 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +@@ -257,7 +257,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { + .num_banks = 8, + .num_chans = 4, + .vmm_page_size_bytes = 4096, +- .dram_clock_change_latency_us = 23.84, ++ .dram_clock_change_latency_us = 11.72, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3600, + .xfc_bus_transport_time_us = 4, +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c +index b16aea0e39992..6dd29bad1609f 100644 +--- a/drivers/net/dsa/bcm_sf2.c ++++ b/drivers/net/dsa/bcm_sf2.c +@@ -421,15 +421,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) + /* Find our integrated MDIO bus node */ + dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio"); + priv->master_mii_bus = of_mdio_find_bus(dn); +- if (!priv->master_mii_bus) ++ if (!priv->master_mii_bus) { ++ of_node_put(dn); + return -EPROBE_DEFER; ++ } + + get_device(&priv->master_mii_bus->dev); + priv->master_mii_dn = dn; + + priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev); +- if (!priv->slave_mii_bus) ++ if (!priv->slave_mii_bus) { ++ of_node_put(dn); + return -ENOMEM; ++ } + + priv->slave_mii_bus->priv = priv; + priv->slave_mii_bus->name = "sf2 slave mii"; +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c +index 9040340fad198..c3079f436f6d7 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -4752,6 +4752,12 @@ static void ibmvnic_tasklet(void *data) + while (!done) { + /* Pull all the valid messages off the CRQ */ + while ((crq = ibmvnic_next_crq(adapter)) != NULL) { ++ /* This barrier makes sure ibmvnic_next_crq()'s ++ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded ++ * before ibmvnic_handle_crq()'s ++ * switch(gen_crq->first) and switch(gen_crq->cmd). ++ */ ++ dma_rmb(); + ibmvnic_handle_crq(crq, adapter); + crq->generic.first = 0; + } +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index 7a964271959d8..c2cabd77884bf 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -1295,8 +1295,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) + } + + length = (io.nblocks + 1) << ns->lba_shift; +- meta_len = (io.nblocks + 1) * ns->ms; +- metadata = nvme_to_user_ptr(io.metadata); ++ ++ if ((io.control & NVME_RW_PRINFO_PRACT) && ++ ns->ms == sizeof(struct t10_pi_tuple)) { ++ /* ++ * Protection information is stripped/inserted by the ++ * controller. ++ */ ++ if (nvme_to_user_ptr(io.metadata)) ++ return -EINVAL; ++ meta_len = 0; ++ metadata = NULL; ++ } else { ++ meta_len = (io.nblocks + 1) * ns->ms; ++ metadata = nvme_to_user_ptr(io.metadata); ++ } + + if (ns->ext) { + length += meta_len; +diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c +index 5baf64dfb24de..1bebad36bf2e5 100644 +--- a/drivers/phy/motorola/phy-cpcap-usb.c ++++ b/drivers/phy/motorola/phy-cpcap-usb.c +@@ -625,35 +625,42 @@ static int cpcap_usb_phy_probe(struct platform_device *pdev) + generic_phy = devm_phy_create(ddata->dev, NULL, &ops); + if (IS_ERR(generic_phy)) { + error = PTR_ERR(generic_phy); +- return PTR_ERR(generic_phy); ++ goto out_reg_disable; + } + + phy_set_drvdata(generic_phy, ddata); + + phy_provider = devm_of_phy_provider_register(ddata->dev, + of_phy_simple_xlate); +- if (IS_ERR(phy_provider)) +- return PTR_ERR(phy_provider); ++ if (IS_ERR(phy_provider)) { ++ error = PTR_ERR(phy_provider); ++ goto out_reg_disable; ++ } + + error = cpcap_usb_init_optional_pins(ddata); + if (error) +- return error; ++ goto out_reg_disable; + + cpcap_usb_init_optional_gpios(ddata); + + error = cpcap_usb_init_iio(ddata); + if (error) +- return error; ++ goto out_reg_disable; + + error = cpcap_usb_init_interrupts(pdev, ddata); + if (error) +- return error; ++ goto out_reg_disable; + + usb_add_phy_dev(&ddata->phy); + atomic_set(&ddata->active, 1); + schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1)); + + return 0; ++ ++out_reg_disable: ++ regulator_disable(ddata->vusb); ++ ++ return error; + } + + static int cpcap_usb_phy_remove(struct platform_device *pdev) +diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c +index 37035dca469cf..d4fc2cbf78703 100644 +--- a/drivers/platform/x86/intel-vbtn.c ++++ b/drivers/platform/x86/intel-vbtn.c +@@ -203,6 +203,12 @@ static const struct dmi_system_id dmi_switches_allow_list[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"), + }, + }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7352"), ++ }, ++ }, + {} /* Array terminator */ + }; + +diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c +index 1e072dbba30d6..7ed1189a7200c 100644 +--- a/drivers/platform/x86/touchscreen_dmi.c ++++ b/drivers/platform/x86/touchscreen_dmi.c +@@ -231,6 +231,16 @@ static const struct ts_dmi_data digma_citi_e200_data = { + .properties = digma_citi_e200_props, + }; + ++static const struct property_entry estar_beauty_hd_props[] = { ++ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), ++ { } ++}; ++ ++static const struct ts_dmi_data estar_beauty_hd_data = { ++ .acpi_name = "GDIX1001:00", ++ .properties = estar_beauty_hd_props, ++}; ++ + static const struct property_entry gp_electronic_t701_props[] = { + PROPERTY_ENTRY_U32("touchscreen-size-x", 960), + PROPERTY_ENTRY_U32("touchscreen-size-y", 640), +@@ -747,6 +757,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { + DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"), + }, + }, ++ { ++ /* Estar Beauty HD (MID 7316R) */ ++ .driver_data = (void *)&estar_beauty_hd_data, ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Estar"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"), ++ }, ++ }, + { + /* GP-electronic T701 */ + .driver_data = (void *)&gp_electronic_t701_data, +diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c +index 522636e946282..c8bf8c7ada6a7 100644 +--- a/drivers/scsi/fnic/vnic_dev.c ++++ b/drivers/scsi/fnic/vnic_dev.c +@@ -444,7 +444,8 @@ int vnic_dev_init_devcmd2(struct vnic_dev *vdev) + fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); + if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ + pr_err("error in devcmd2 init"); +- return -ENODEV; ++ err = -ENODEV; ++ goto err_free_wq; + } + + /* +@@ -460,7 +461,7 @@ int vnic_dev_init_devcmd2(struct vnic_dev *vdev) + err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring, + DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); + if (err) +- goto err_free_wq; ++ goto err_disable_wq; + + vdev->devcmd2->result = + (struct devcmd2_result *) vdev->devcmd2->results_ring.descs; +@@ -481,8 +482,9 @@ int vnic_dev_init_devcmd2(struct vnic_dev *vdev) + + err_free_desc_ring: + vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); +-err_free_wq: ++err_disable_wq: + vnic_wq_disable(&vdev->devcmd2->wq); ++err_free_wq: + vnic_wq_free(&vdev->devcmd2->wq); + err_free_devcmd2: + kfree(vdev->devcmd2); +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c +index 8a76284b59b08..523809a8a2323 100644 +--- a/drivers/scsi/ibmvscsi/ibmvfc.c ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c +@@ -2881,8 +2881,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev) + unsigned long flags = 0; + + spin_lock_irqsave(shost->host_lock, flags); +- if (sdev->type == TYPE_DISK) ++ if (sdev->type == TYPE_DISK) { + sdev->allow_restart = 1; ++ blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); ++ } + spin_unlock_irqrestore(shost->host_lock, flags); + return 0; + } +diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c +index 52e8666598531..e5b18e5d46dac 100644 +--- a/drivers/scsi/libfc/fc_exch.c ++++ b/drivers/scsi/libfc/fc_exch.c +@@ -1619,8 +1619,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) + rc = fc_exch_done_locked(ep); + WARN_ON(fc_seq_exch(sp) != ep); + spin_unlock_bh(&ep->ex_lock); +- if (!rc) ++ if (!rc) { + fc_exch_delete(ep); ++ } else { ++ FC_EXCH_DBG(ep, "ep is completed already," ++ "hence skip calling the resp\n"); ++ goto skip_resp; ++ } + } + + /* +@@ -1639,6 +1644,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) + if (!fc_invoke_resp(ep, sp, fp)) + fc_frame_free(fp); + ++skip_resp: + fc_exch_release(ep); + return; + rel: +@@ -1895,10 +1901,16 @@ static void fc_exch_reset(struct fc_exch *ep) + + fc_exch_hold(ep); + +- if (!rc) ++ if (!rc) { + fc_exch_delete(ep); ++ } else { ++ FC_EXCH_DBG(ep, "ep is completed already," ++ "hence skip calling the resp\n"); ++ goto skip_resp; ++ } + + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED)); ++skip_resp: + fc_seq_set_resp(sp, NULL, ep->arg); + fc_exch_release(ep); + } +diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c +index d4d1104fac991..8cd0a87764dfd 100644 +--- a/drivers/scsi/scsi_transport_srp.c ++++ b/drivers/scsi/scsi_transport_srp.c +@@ -541,7 +541,14 @@ int srp_reconnect_rport(struct srp_rport *rport) + res = mutex_lock_interruptible(&rport->mutex); + if (res) + goto out; +- scsi_target_block(&shost->shost_gendev); ++ if (rport->state != SRP_RPORT_FAIL_FAST) ++ /* ++ * sdev state must be SDEV_TRANSPORT_OFFLINE, transition ++ * to SDEV_BLOCK is illegal. Calling scsi_target_unblock() ++ * later is ok though, scsi_internal_device_unblock_nowait() ++ * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK. ++ */ ++ scsi_target_block(&shost->shost_gendev); + res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV; + pr_debug("%s (state %d): transport.reconnect() returned %d\n", + dev_name(&shost->shost_gendev), rport->state, res); +diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c +index 86e280edf8040..7f644a58db511 100644 +--- a/fs/btrfs/backref.c ++++ b/fs/btrfs/backref.c +@@ -347,33 +347,10 @@ static int add_prelim_ref(const struct btrfs_fs_info *fs_info, + return -ENOMEM; + + ref->root_id = root_id; +- if (key) { ++ if (key) + ref->key_for_search = *key; +- /* +- * We can often find data backrefs with an offset that is too +- * large (>= LLONG_MAX, maximum allowed file offset) due to +- * underflows when subtracting a file's offset with the data +- * offset of its corresponding extent data item. This can +- * happen for example in the clone ioctl. +- * So if we detect such case we set the search key's offset to +- * zero to make sure we will find the matching file extent item +- * at add_all_parents(), otherwise we will miss it because the +- * offset taken form the backref is much larger then the offset +- * of the file extent item. This can make us scan a very large +- * number of file extent items, but at least it will not make +- * us miss any. +- * This is an ugly workaround for a behaviour that should have +- * never existed, but it does and a fix for the clone ioctl +- * would touch a lot of places, cause backwards incompatibility +- * and would not fix the problem for extents cloned with older +- * kernels. +- */ +- if (ref->key_for_search.type == BTRFS_EXTENT_DATA_KEY && +- ref->key_for_search.offset >= LLONG_MAX) +- ref->key_for_search.offset = 0; +- } else { ++ else + memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); +- } + + ref->inode_list = NULL; + ref->level = level; +@@ -409,10 +386,36 @@ static int add_indirect_ref(const struct btrfs_fs_info *fs_info, + wanted_disk_byte, count, sc, gfp_mask); + } + ++static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr) ++{ ++ struct rb_node **p = &preftrees->direct.root.rb_root.rb_node; ++ struct rb_node *parent = NULL; ++ struct prelim_ref *ref = NULL; ++ struct prelim_ref target = {0}; ++ int result; ++ ++ target.parent = bytenr; ++ ++ while (*p) { ++ parent = *p; ++ ref = rb_entry(parent, struct prelim_ref, rbnode); ++ result = prelim_ref_compare(ref, &target); ++ ++ if (result < 0) ++ p = &(*p)->rb_left; ++ else if (result > 0) ++ p = &(*p)->rb_right; ++ else ++ return 1; ++ } ++ return 0; ++} ++ + static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, +- struct ulist *parents, struct prelim_ref *ref, ++ struct ulist *parents, ++ struct preftrees *preftrees, struct prelim_ref *ref, + int level, u64 time_seq, const u64 *extent_item_pos, +- u64 total_refs, bool ignore_offset) ++ bool ignore_offset) + { + int ret = 0; + int slot; +@@ -424,6 +427,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, + u64 disk_byte; + u64 wanted_disk_byte = ref->wanted_disk_byte; + u64 count = 0; ++ u64 data_offset; + + if (level != 0) { + eb = path->nodes[level]; +@@ -434,18 +438,26 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, + } + + /* +- * We normally enter this function with the path already pointing to +- * the first item to check. But sometimes, we may enter it with +- * slot==nritems. In that case, go to the next leaf before we continue. ++ * 1. We normally enter this function with the path already pointing to ++ * the first item to check. But sometimes, we may enter it with ++ * slot == nritems. ++ * 2. We are searching for normal backref but bytenr of this leaf ++ * matches shared data backref ++ * 3. The leaf owner is not equal to the root we are searching ++ * ++ * For these cases, go to the next leaf before we continue. + */ +- if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { ++ eb = path->nodes[0]; ++ if (path->slots[0] >= btrfs_header_nritems(eb) || ++ is_shared_data_backref(preftrees, eb->start) || ++ ref->root_id != btrfs_header_owner(eb)) { + if (time_seq == SEQ_LAST) + ret = btrfs_next_leaf(root, path); + else + ret = btrfs_next_old_leaf(root, path, time_seq); + } + +- while (!ret && count < total_refs) { ++ while (!ret && count < ref->count) { + eb = path->nodes[0]; + slot = path->slots[0]; + +@@ -455,13 +467,31 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, + key.type != BTRFS_EXTENT_DATA_KEY) + break; + ++ /* ++ * We are searching for normal backref but bytenr of this leaf ++ * matches shared data backref, OR ++ * the leaf owner is not equal to the root we are searching for ++ */ ++ if (slot == 0 && ++ (is_shared_data_backref(preftrees, eb->start) || ++ ref->root_id != btrfs_header_owner(eb))) { ++ if (time_seq == SEQ_LAST) ++ ret = btrfs_next_leaf(root, path); ++ else ++ ret = btrfs_next_old_leaf(root, path, time_seq); ++ continue; ++ } + fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); + disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); ++ data_offset = btrfs_file_extent_offset(eb, fi); + + if (disk_byte == wanted_disk_byte) { + eie = NULL; + old = NULL; +- count++; ++ if (ref->key_for_search.offset == key.offset - data_offset) ++ count++; ++ else ++ goto next; + if (extent_item_pos) { + ret = check_extent_in_eb(&key, eb, fi, + *extent_item_pos, +@@ -502,9 +532,9 @@ next: + */ + static int resolve_indirect_ref(struct btrfs_fs_info *fs_info, + struct btrfs_path *path, u64 time_seq, ++ struct preftrees *preftrees, + struct prelim_ref *ref, struct ulist *parents, +- const u64 *extent_item_pos, u64 total_refs, +- bool ignore_offset) ++ const u64 *extent_item_pos, bool ignore_offset) + { + struct btrfs_root *root; + struct btrfs_key root_key; +@@ -513,6 +543,7 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info, + int root_level; + int level = ref->level; + int index; ++ struct btrfs_key search_key = ref->key_for_search; + + root_key.objectid = ref->root_id; + root_key.type = BTRFS_ROOT_ITEM_KEY; +@@ -545,13 +576,33 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info, + goto out; + } + ++ /* ++ * We can often find data backrefs with an offset that is too large ++ * (>= LLONG_MAX, maximum allowed file offset) due to underflows when ++ * subtracting a file's offset with the data offset of its ++ * corresponding extent data item. This can happen for example in the ++ * clone ioctl. ++ * ++ * So if we detect such case we set the search key's offset to zero to ++ * make sure we will find the matching file extent item at ++ * add_all_parents(), otherwise we will miss it because the offset ++ * taken form the backref is much larger then the offset of the file ++ * extent item. This can make us scan a very large number of file ++ * extent items, but at least it will not make us miss any. ++ * ++ * This is an ugly workaround for a behaviour that should have never ++ * existed, but it does and a fix for the clone ioctl would touch a lot ++ * of places, cause backwards incompatibility and would not fix the ++ * problem for extents cloned with older kernels. ++ */ ++ if (search_key.type == BTRFS_EXTENT_DATA_KEY && ++ search_key.offset >= LLONG_MAX) ++ search_key.offset = 0; + path->lowest_level = level; + if (time_seq == SEQ_LAST) +- ret = btrfs_search_slot(NULL, root, &ref->key_for_search, path, +- 0, 0); ++ ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); + else +- ret = btrfs_search_old_slot(root, &ref->key_for_search, path, +- time_seq); ++ ret = btrfs_search_old_slot(root, &search_key, path, time_seq); + + /* root node has been locked, we can release @subvol_srcu safely here */ + srcu_read_unlock(&fs_info->subvol_srcu, index); +@@ -574,8 +625,8 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info, + eb = path->nodes[level]; + } + +- ret = add_all_parents(root, path, parents, ref, level, time_seq, +- extent_item_pos, total_refs, ignore_offset); ++ ret = add_all_parents(root, path, parents, preftrees, ref, level, ++ time_seq, extent_item_pos, ignore_offset); + out: + path->lowest_level = 0; + btrfs_release_path(path); +@@ -609,7 +660,7 @@ unode_aux_to_inode_list(struct ulist_node *node) + static int resolve_indirect_refs(struct btrfs_fs_info *fs_info, + struct btrfs_path *path, u64 time_seq, + struct preftrees *preftrees, +- const u64 *extent_item_pos, u64 total_refs, ++ const u64 *extent_item_pos, + struct share_check *sc, bool ignore_offset) + { + int err; +@@ -653,9 +704,9 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info, + ret = BACKREF_FOUND_SHARED; + goto out; + } +- err = resolve_indirect_ref(fs_info, path, time_seq, ref, +- parents, extent_item_pos, +- total_refs, ignore_offset); ++ err = resolve_indirect_ref(fs_info, path, time_seq, preftrees, ++ ref, parents, extent_item_pos, ++ ignore_offset); + /* + * we can only tolerate ENOENT,otherwise,we should catch error + * and return directly. +@@ -758,8 +809,7 @@ static int add_missing_keys(struct btrfs_fs_info *fs_info, + */ + static int add_delayed_refs(const struct btrfs_fs_info *fs_info, + struct btrfs_delayed_ref_head *head, u64 seq, +- struct preftrees *preftrees, u64 *total_refs, +- struct share_check *sc) ++ struct preftrees *preftrees, struct share_check *sc) + { + struct btrfs_delayed_ref_node *node; + struct btrfs_delayed_extent_op *extent_op = head->extent_op; +@@ -793,7 +843,6 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, + default: + BUG(); + } +- *total_refs += count; + switch (node->type) { + case BTRFS_TREE_BLOCK_REF_KEY: { + /* NORMAL INDIRECT METADATA backref */ +@@ -876,7 +925,7 @@ out: + static int add_inline_refs(const struct btrfs_fs_info *fs_info, + struct btrfs_path *path, u64 bytenr, + int *info_level, struct preftrees *preftrees, +- u64 *total_refs, struct share_check *sc) ++ struct share_check *sc) + { + int ret = 0; + int slot; +@@ -900,7 +949,6 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info, + + ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); + flags = btrfs_extent_flags(leaf, ei); +- *total_refs += btrfs_extent_refs(leaf, ei); + btrfs_item_key_to_cpu(leaf, &found_key, slot); + + ptr = (unsigned long)(ei + 1); +@@ -1125,8 +1173,6 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, + struct prelim_ref *ref; + struct rb_node *node; + struct extent_inode_elem *eie = NULL; +- /* total of both direct AND indirect refs! */ +- u64 total_refs = 0; + struct preftrees preftrees = { + .direct = PREFTREE_INIT, + .indirect = PREFTREE_INIT, +@@ -1195,7 +1241,7 @@ again: + } + spin_unlock(&delayed_refs->lock); + ret = add_delayed_refs(fs_info, head, time_seq, +- &preftrees, &total_refs, sc); ++ &preftrees, sc); + mutex_unlock(&head->mutex); + if (ret) + goto out; +@@ -1216,8 +1262,7 @@ again: + (key.type == BTRFS_EXTENT_ITEM_KEY || + key.type == BTRFS_METADATA_ITEM_KEY)) { + ret = add_inline_refs(fs_info, path, bytenr, +- &info_level, &preftrees, +- &total_refs, sc); ++ &info_level, &preftrees, sc); + if (ret) + goto out; + ret = add_keyed_refs(fs_info, path, bytenr, info_level, +@@ -1236,7 +1281,7 @@ again: + WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root)); + + ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees, +- extent_item_pos, total_refs, sc, ignore_offset); ++ extent_item_pos, sc, ignore_offset); + if (ret) + goto out; + +diff --git a/fs/udf/super.c b/fs/udf/super.c +index 4aba4878ed967..8bb001c7927f0 100644 +--- a/fs/udf/super.c ++++ b/fs/udf/super.c +@@ -705,6 +705,7 @@ static int udf_check_vsd(struct super_block *sb) + struct buffer_head *bh = NULL; + int nsr = 0; + struct udf_sb_info *sbi; ++ loff_t session_offset; + + sbi = UDF_SB(sb); + if (sb->s_blocksize < sizeof(struct volStructDesc)) +@@ -712,7 +713,8 @@ static int udf_check_vsd(struct super_block *sb) + else + sectorsize = sb->s_blocksize; + +- sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits); ++ session_offset = (loff_t)sbi->s_session << sb->s_blocksize_bits; ++ sector += session_offset; + + udf_debug("Starting at sector %u (%lu byte sectors)\n", + (unsigned int)(sector >> sb->s_blocksize_bits), +@@ -757,8 +759,7 @@ static int udf_check_vsd(struct super_block *sb) + + if (nsr > 0) + return 1; +- else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) == +- VSD_FIRST_SECTOR_OFFSET) ++ else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET) + return -1; + else + return 0; +diff --git a/include/linux/kthread.h b/include/linux/kthread.h +index 0f9da966934e2..c7108ce5a051c 100644 +--- a/include/linux/kthread.h ++++ b/include/linux/kthread.h +@@ -31,6 +31,9 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), + unsigned int cpu, + const char *namefmt); + ++void kthread_set_per_cpu(struct task_struct *k, int cpu); ++bool kthread_is_per_cpu(struct task_struct *k); ++ + /** + * kthread_run - create and wake a thread. + * @threadfn: the function to run until signal_pending(current). +diff --git a/include/net/tcp.h b/include/net/tcp.h +index 4b38ba101b9b7..37b51456784f8 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -619,6 +619,7 @@ static inline void tcp_clear_xmit_timers(struct sock *sk) + + unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); + unsigned int tcp_current_mss(struct sock *sk); ++u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when); + + /* Bound MSS / TSO packet size with the half of the window */ + static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) +diff --git a/kernel/kthread.c b/kernel/kthread.c +index e51f0006057df..1d4c98a19043f 100644 +--- a/kernel/kthread.c ++++ b/kernel/kthread.c +@@ -469,11 +469,36 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), + return p; + kthread_bind(p, cpu); + /* CPU hotplug need to bind once again when unparking the thread. */ +- set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags); + to_kthread(p)->cpu = cpu; + return p; + } + ++void kthread_set_per_cpu(struct task_struct *k, int cpu) ++{ ++ struct kthread *kthread = to_kthread(k); ++ if (!kthread) ++ return; ++ ++ WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY)); ++ ++ if (cpu < 0) { ++ clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags); ++ return; ++ } ++ ++ kthread->cpu = cpu; ++ set_bit(KTHREAD_IS_PER_CPU, &kthread->flags); ++} ++ ++bool kthread_is_per_cpu(struct task_struct *k) ++{ ++ struct kthread *kthread = to_kthread(k); ++ if (!kthread) ++ return false; ++ ++ return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags); ++} ++ + /** + * kthread_unpark - unpark a thread created by kthread_create(). + * @k: thread created by kthread_create(). +diff --git a/kernel/smpboot.c b/kernel/smpboot.c +index 2efe1e206167c..f25208e8df836 100644 +--- a/kernel/smpboot.c ++++ b/kernel/smpboot.c +@@ -188,6 +188,7 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu) + kfree(td); + return PTR_ERR(tsk); + } ++ kthread_set_per_cpu(tsk, cpu); + /* + * Park the thread so that it could start right on the CPU + * when it is available. +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index 28e52657e0930..29c36c0290623 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -1847,12 +1847,6 @@ static void worker_attach_to_pool(struct worker *worker, + { + mutex_lock(&wq_pool_attach_mutex); + +- /* +- * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any +- * online CPUs. It'll be re-applied when any of the CPUs come up. +- */ +- set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); +- + /* + * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains + * stable across this function. See the comments above the flag +@@ -1861,6 +1855,9 @@ static void worker_attach_to_pool(struct worker *worker, + if (pool->flags & POOL_DISASSOCIATED) + worker->flags |= WORKER_UNBOUND; + ++ if (worker->rescue_wq) ++ set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); ++ + list_add_tail(&worker->node, &pool->workers); + worker->pool = pool; + +diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c +index bfe7bdd4c3406..98c396769be94 100644 +--- a/net/core/gen_estimator.c ++++ b/net/core/gen_estimator.c +@@ -80,11 +80,11 @@ static void est_timer(struct timer_list *t) + u64 rate, brate; + + est_fetch_counters(est, &b); +- brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log); +- brate -= (est->avbps >> est->ewma_log); ++ brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log); ++ brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log); + +- rate = (u64)(b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log); +- rate -= (est->avpps >> est->ewma_log); ++ rate = (u64)(b.packets - est->last_packets) << (10 - est->intvl_log); ++ rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log); + + write_seqcount_begin(&est->seq); + est->avbps += brate; +@@ -143,6 +143,9 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, + if (parm->interval < -2 || parm->interval > 3) + return -EINVAL; + ++ if (parm->ewma_log == 0 || parm->ewma_log >= 31) ++ return -EINVAL; ++ + est = kzalloc(sizeof(*est), GFP_KERNEL); + if (!est) + return -ENOBUFS; +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 26305aa88651f..a1768ded2d545 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -3295,6 +3295,7 @@ static void tcp_ack_probe(struct sock *sk) + } else { + unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX); + ++ when = tcp_clamp_probe0_to_user_timeout(sk, when); + tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, + when, TCP_RTO_MAX, NULL); + } +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 5da6ffce390c2..d0774b4e934d6 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -3850,6 +3850,8 @@ void tcp_send_probe0(struct sock *sk) + */ + timeout = TCP_RESOURCE_PROBE_INTERVAL; + } ++ ++ timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout); + tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX, NULL); + } + +diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c +index 7fcd116fbd378..fa2ae96ecdc40 100644 +--- a/net/ipv4/tcp_timer.c ++++ b/net/ipv4/tcp_timer.c +@@ -40,6 +40,24 @@ static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk) + return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining)); + } + ++u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when) ++{ ++ struct inet_connection_sock *icsk = inet_csk(sk); ++ u32 remaining; ++ s32 elapsed; ++ ++ if (!icsk->icsk_user_timeout || !icsk->icsk_probes_tstamp) ++ return when; ++ ++ elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp; ++ if (unlikely(elapsed < 0)) ++ elapsed = 0; ++ remaining = msecs_to_jiffies(icsk->icsk_user_timeout) - elapsed; ++ remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN); ++ ++ return min_t(u32, remaining, when); ++} ++ + /** + * tcp_write_err() - close socket and save error info + * @sk: The socket the error has appeared on. +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c +index 3ab85e1e38d82..1a15e7bae106a 100644 +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -4080,6 +4080,8 @@ void ieee80211_check_fast_rx(struct sta_info *sta) + + rcu_read_lock(); + key = rcu_dereference(sta->ptk[sta->ptk_idx]); ++ if (!key) ++ key = rcu_dereference(sdata->default_unicast_key); + if (key) { + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_TKIP: +diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c +index 3a1d428c13369..ea9ddea35a886 100644 +--- a/net/switchdev/switchdev.c ++++ b/net/switchdev/switchdev.c +@@ -461,10 +461,11 @@ static int __switchdev_handle_port_obj_add(struct net_device *dev, + extack = switchdev_notifier_info_to_extack(&port_obj_info->info); + + if (check_cb(dev)) { +- /* This flag is only checked if the return value is success. */ +- port_obj_info->handled = true; +- return add_cb(dev, port_obj_info->obj, port_obj_info->trans, +- extack); ++ err = add_cb(dev, port_obj_info->obj, port_obj_info->trans, ++ extack); ++ if (err != -EOPNOTSUPP) ++ port_obj_info->handled = true; ++ return err; + } + + /* Switch ports might be stacked under e.g. a LAG. Ignore the +@@ -513,9 +514,10 @@ static int __switchdev_handle_port_obj_del(struct net_device *dev, + int err = -EOPNOTSUPP; + + if (check_cb(dev)) { +- /* This flag is only checked if the return value is success. */ +- port_obj_info->handled = true; +- return del_cb(dev, port_obj_info->obj); ++ err = del_cb(dev, port_obj_info->obj); ++ if (err != -EOPNOTSUPP) ++ port_obj_info->handled = true; ++ return err; + } + + /* Switch ports might be stacked under e.g. a LAG. Ignore the +@@ -563,9 +565,10 @@ static int __switchdev_handle_port_attr_set(struct net_device *dev, + int err = -EOPNOTSUPP; + + if (check_cb(dev)) { +- port_attr_info->handled = true; +- return set_cb(dev, port_attr_info->attr, +- port_attr_info->trans); ++ err = set_cb(dev, port_attr_info->attr, port_attr_info->trans); ++ if (err != -EOPNOTSUPP) ++ port_attr_info->handled = true; ++ return err; + } + + /* Switch ports might be stacked under e.g. a LAG. Ignore the +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 5f515a29668c8..b3667a5efdc1f 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -2450,6 +2450,9 @@ static const struct pci_device_id azx_ids[] = { + /* CometLake-S */ + { PCI_DEVICE(0x8086, 0xa3f0), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, ++ /* CometLake-R */ ++ { PCI_DEVICE(0x8086, 0xf0c8), ++ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + /* Icelake */ + { PCI_DEVICE(0x8086, 0x34c8), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, +diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c +index 9e8233c10d860..df38616c431a6 100644 +--- a/sound/soc/sof/intel/hda-codec.c ++++ b/sound/soc/sof/intel/hda-codec.c +@@ -68,8 +68,7 @@ void hda_codec_jack_check(struct snd_sof_dev *sdev) + * has been recorded in STATESTS + */ + if (codec->jacktbl.used) +- schedule_delayed_work(&codec->jackpoll_work, +- codec->jackpoll_interval); ++ pm_request_resume(&codec->core.dev); + } + #else + void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev) {} +diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c +index edba4745f25a9..693d740107a8b 100644 +--- a/tools/objtool/elf.c ++++ b/tools/objtool/elf.c +@@ -214,8 +214,11 @@ static int read_symbols(struct elf *elf) + + symtab = find_section_by_name(elf, ".symtab"); + if (!symtab) { +- WARN("missing symbol table"); +- return -1; ++ /* ++ * A missing symbol table is actually possible if it's an empty ++ * .o file. This can happen for thunk_64.o. ++ */ ++ return 0; + } + + symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize; +diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c +index 0453c50c949cb..0725239bbd85c 100644 +--- a/tools/testing/selftests/powerpc/alignment/alignment_handler.c ++++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c +@@ -380,7 +380,6 @@ int test_alignment_handler_integer(void) + LOAD_DFORM_TEST(ldu); + LOAD_XFORM_TEST(ldx); + LOAD_XFORM_TEST(ldux); +- LOAD_DFORM_TEST(lmw); + STORE_DFORM_TEST(stb); + STORE_XFORM_TEST(stbx); + STORE_DFORM_TEST(stbu); +@@ -399,7 +398,11 @@ int test_alignment_handler_integer(void) + STORE_XFORM_TEST(stdx); + STORE_DFORM_TEST(stdu); + STORE_XFORM_TEST(stdux); ++ ++#ifdef __BIG_ENDIAN__ ++ LOAD_DFORM_TEST(lmw); + STORE_DFORM_TEST(stmw); ++#endif + + return rc; + } diff --git a/patch/kernel/odroidxu4-current/patch-5.4.96-97.patch b/patch/kernel/odroidxu4-current/patch-5.4.96-97.patch new file mode 100644 index 000000000..eefa48f02 --- /dev/null +++ b/patch/kernel/odroidxu4-current/patch-5.4.96-97.patch @@ -0,0 +1,2250 @@ +diff --git a/Makefile b/Makefile +index 7a47a2594f957..032751f6be0c1 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 96 ++SUBLEVEL = 97 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +@@ -920,12 +920,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) + # change __FILE__ to the relative path from the srctree + KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) + +-# ensure -fcf-protection is disabled when using retpoline as it is +-# incompatible with -mindirect-branch=thunk-extern +-ifdef CONFIG_RETPOLINE +-KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) +-endif +- + include scripts/Makefile.kasan + include scripts/Makefile.extrawarn + include scripts/Makefile.ubsan +diff --git a/arch/arm/boot/dts/sun7i-a20-bananapro.dts b/arch/arm/boot/dts/sun7i-a20-bananapro.dts +index 01ccff756996d..5740f9442705c 100644 +--- a/arch/arm/boot/dts/sun7i-a20-bananapro.dts ++++ b/arch/arm/boot/dts/sun7i-a20-bananapro.dts +@@ -110,7 +110,7 @@ + pinctrl-names = "default"; + pinctrl-0 = <&gmac_rgmii_pins>; + phy-handle = <&phy1>; +- phy-mode = "rgmii"; ++ phy-mode = "rgmii-id"; + phy-supply = <®_gmac_3v3>; + status = "okay"; + }; +diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c +index 8b81a17f675d9..e17ec92b90dd8 100644 +--- a/arch/arm/mach-footbridge/dc21285.c ++++ b/arch/arm/mach-footbridge/dc21285.c +@@ -66,15 +66,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where, + if (addr) + switch (size) { + case 1: +- asm("ldrb %0, [%1, %2]" ++ asm volatile("ldrb %0, [%1, %2]" + : "=r" (v) : "r" (addr), "r" (where) : "cc"); + break; + case 2: +- asm("ldrh %0, [%1, %2]" ++ asm volatile("ldrh %0, [%1, %2]" + : "=r" (v) : "r" (addr), "r" (where) : "cc"); + break; + case 4: +- asm("ldr %0, [%1, %2]" ++ asm volatile("ldr %0, [%1, %2]" + : "=r" (v) : "r" (addr), "r" (where) : "cc"); + break; + } +@@ -100,17 +100,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where, + if (addr) + switch (size) { + case 1: +- asm("strb %0, [%1, %2]" ++ asm volatile("strb %0, [%1, %2]" + : : "r" (value), "r" (addr), "r" (where) + : "cc"); + break; + case 2: +- asm("strh %0, [%1, %2]" ++ asm volatile("strh %0, [%1, %2]" + : : "r" (value), "r" (addr), "r" (where) + : "cc"); + break; + case 4: +- asm("str %0, [%1, %2]" ++ asm volatile("str %0, [%1, %2]" + : : "r" (value), "r" (addr), "r" (where) + : "cc"); + break; +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi +index 354ef2f3eac67..9533c85fb0a30 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi ++++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi +@@ -2382,7 +2382,7 @@ + interrupts = ; + dr_mode = "host"; + snps,dis_u2_susphy_quirk; +- snps,quirk-frame-length-adjustment; ++ snps,quirk-frame-length-adjustment = <0x20>; + snps,parkmode-disable-ss-quirk; + }; + }; +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi +index d4c1da3d4bde2..04d4b1b11a00a 100644 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi +@@ -304,7 +304,7 @@ + + dcfg: dcfg@1ee0000 { + compatible = "fsl,ls1046a-dcfg", "syscon"; +- reg = <0x0 0x1ee0000 0x0 0x10000>; ++ reg = <0x0 0x1ee0000 0x0 0x1000>; + big-endian; + }; + +diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +index f539b3655f6b9..e638f216dbfb3 100644 +--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts ++++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +@@ -243,6 +243,8 @@ + &i2c3 { + status = "okay"; + clock-frequency = <400000>; ++ /* Overwrite pinctrl-0 from sdm845.dtsi */ ++ pinctrl-0 = <&qup_i2c3_default &i2c3_hid_active>; + + tsel: hid@15 { + compatible = "hid-over-i2c"; +@@ -250,9 +252,6 @@ + hid-descr-addr = <0x1>; + + interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>; +- +- pinctrl-names = "default"; +- pinctrl-0 = <&i2c3_hid_active>; + }; + + tsc2: hid@2c { +@@ -261,11 +260,6 @@ + hid-descr-addr = <0x20>; + + interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>; +- +- pinctrl-names = "default"; +- pinctrl-0 = <&i2c3_hid_active>; +- +- status = "disabled"; + }; + }; + +diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi +index 9e09909a510a1..98b014a8f9165 100644 +--- a/arch/arm64/boot/dts/rockchip/px30.dtsi ++++ b/arch/arm64/boot/dts/rockchip/px30.dtsi +@@ -860,7 +860,7 @@ + vopl_mmu: iommu@ff470f00 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff470f00 0x0 0x100>; +- interrupts = ; ++ interrupts = ; + interrupt-names = "vopl_mmu"; + clocks = <&cru ACLK_VOPL>, <&cru HCLK_VOPL>; + clock-names = "aclk", "hclk"; +diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c +index 179b41ad63baf..18618af3835f9 100644 +--- a/arch/um/drivers/virtio_uml.c ++++ b/arch/um/drivers/virtio_uml.c +@@ -959,6 +959,7 @@ static void virtio_uml_release_dev(struct device *d) + } + + os_close_file(vu_dev->sock); ++ kfree(vu_dev); + } + + /* Platform device */ +@@ -977,7 +978,7 @@ static int virtio_uml_probe(struct platform_device *pdev) + if (!pdata) + return -EINVAL; + +- vu_dev = devm_kzalloc(&pdev->dev, sizeof(*vu_dev), GFP_KERNEL); ++ vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL); + if (!vu_dev) + return -ENOMEM; + +diff --git a/arch/x86/Makefile b/arch/x86/Makefile +index 94df0868804bc..b5e3bfd4facea 100644 +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -131,6 +131,9 @@ else + + KBUILD_CFLAGS += -mno-red-zone + KBUILD_CFLAGS += -mcmodel=kernel ++ ++ # Intel CET isn't enabled in the kernel ++ KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) + endif + + ifdef CONFIG_X86_X32 +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h +index 6016559ed1713..5bef1575708dc 100644 +--- a/arch/x86/include/asm/apic.h ++++ b/arch/x86/include/asm/apic.h +@@ -197,16 +197,6 @@ static inline bool apic_needs_pit(void) { return true; } + #endif /* !CONFIG_X86_LOCAL_APIC */ + + #ifdef CONFIG_X86_X2APIC +-/* +- * Make previous memory operations globally visible before +- * sending the IPI through x2apic wrmsr. We need a serializing instruction or +- * mfence for this. +- */ +-static inline void x2apic_wrmsr_fence(void) +-{ +- asm volatile("mfence" : : : "memory"); +-} +- + static inline void native_apic_msr_write(u32 reg, u32 v) + { + if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || +diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h +index 7f828fe497978..4819d5e5a3353 100644 +--- a/arch/x86/include/asm/barrier.h ++++ b/arch/x86/include/asm/barrier.h +@@ -84,4 +84,22 @@ do { \ + + #include + ++/* ++ * Make previous memory operations globally visible before ++ * a WRMSR. ++ * ++ * MFENCE makes writes visible, but only affects load/store ++ * instructions. WRMSR is unfortunately not a load/store ++ * instruction and is unaffected by MFENCE. The LFENCE ensures ++ * that the WRMSR is not reordered. ++ * ++ * Most WRMSRs are full serializing instructions themselves and ++ * do not require this barrier. This is only required for the ++ * IA32_TSC_DEADLINE and X2APIC MSRs. ++ */ ++static inline void weak_wrmsr_fence(void) ++{ ++ asm volatile("mfence; lfence" : : : "memory"); ++} ++ + #endif /* _ASM_X86_BARRIER_H */ +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index 06fa808d72032..3dca7b8642e9c 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -42,6 +42,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -472,6 +473,9 @@ static int lapic_next_deadline(unsigned long delta, + { + u64 tsc; + ++ /* This MSR is special and need a special fence: */ ++ weak_wrmsr_fence(); ++ + tsc = rdtsc(); + wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); + return 0; +diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c +index b0889c48a2ac5..7eec3c154fa24 100644 +--- a/arch/x86/kernel/apic/x2apic_cluster.c ++++ b/arch/x86/kernel/apic/x2apic_cluster.c +@@ -29,7 +29,8 @@ static void x2apic_send_IPI(int cpu, int vector) + { + u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu); + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL); + } + +@@ -41,7 +42,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) + unsigned long flags; + u32 dest; + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + local_irq_save(flags); + + tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask); +diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c +index e14eae6d6ea71..032a00e5d9fa6 100644 +--- a/arch/x86/kernel/apic/x2apic_phys.c ++++ b/arch/x86/kernel/apic/x2apic_phys.c +@@ -43,7 +43,8 @@ static void x2apic_send_IPI(int cpu, int vector) + { + u32 dest = per_cpu(x86_cpu_to_apicid, cpu); + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + __x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL); + } + +@@ -54,7 +55,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) + unsigned long this_cpu; + unsigned long flags; + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + + local_irq_save(flags); + +@@ -125,7 +127,8 @@ void __x2apic_send_IPI_shorthand(int vector, u32 which) + { + unsigned long cfg = __prepare_ICR(which, vector, 0); + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + native_x2apic_icr_write(cfg, 0); + } + +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index 39265b55929d2..60c8dcb907a50 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -2890,6 +2890,8 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt) + ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); + *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data : + (u32)msr_data; ++ if (efer & EFER_LMA) ++ ctxt->mode = X86EMUL_MODE_PROT64; + + return X86EMUL_CONTINUE; + } +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index 2b506904be024..4906e480b5bb6 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -889,6 +889,11 @@ static int has_svm(void) + return 0; + } + ++ if (sev_active()) { ++ pr_info("KVM is unsupported when running as an SEV guest\n"); ++ return 0; ++ } ++ + return 1; + } + +diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c +index 9268c12458c84..dfa01bcdc3694 100644 +--- a/arch/x86/mm/mem_encrypt.c ++++ b/arch/x86/mm/mem_encrypt.c +@@ -375,6 +375,7 @@ bool force_dma_unencrypted(struct device *dev) + + return false; + } ++EXPORT_SYMBOL_GPL(sev_active); + + /* Architecture __weak replacement functions */ + void __init mem_encrypt_free_decrypted_mem(void) +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index d2dd387c95d86..de06ee7d2ad46 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -1434,8 +1434,6 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) + + drm_connector_update_edid_property(connector, + aconnector->edid); +- drm_add_edid_modes(connector, aconnector->edid); +- + if (aconnector->dc_link->aux_mode) + drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, + aconnector->edid); +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index 8c73377ac82ca..3d004ca76b6ed 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -215,9 +215,17 @@ static const struct xpad_device { + { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, +- { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, +@@ -296,6 +304,9 @@ static const struct xpad_device { + { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 }, ++ { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, ++ { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 }, ++ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE }, + { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, + { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, +@@ -429,8 +440,12 @@ static const struct usb_device_id xpad_table[] = { + XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ + XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ + XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */ ++ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */ ++ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */ + XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ + XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */ ++ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */ ++ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */ + { } + }; + +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index eca931da76c3a..b7dbcbac3a1a5 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -219,6 +219,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { + DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), + DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), + }, ++ }, ++ { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"), + DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"), +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index 984c7a6ea4fe8..953d86ca6d2b2 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -3285,6 +3285,12 @@ static int __init init_dmars(void) + + if (!ecap_pass_through(iommu->ecap)) + hw_pass_through = 0; ++ ++ if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) { ++ pr_info("Disable batched IOTLB flush due to virtualization"); ++ intel_iommu_strict = 1; ++ } ++ + #ifdef CONFIG_INTEL_IOMMU_SVM + if (pasid_supported(iommu)) + intel_svm_init(iommu); +diff --git a/drivers/md/md.c b/drivers/md/md.c +index ec5dfb7ae4e16..cc38530804c90 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -538,8 +538,10 @@ static void md_submit_flush_data(struct work_struct *ws) + * could wait for this and below md_handle_request could wait for those + * bios because of suspend check + */ ++ spin_lock_irq(&mddev->lock); + mddev->last_flush = mddev->start_flush; + mddev->flush_bio = NULL; ++ spin_unlock_irq(&mddev->lock); + wake_up(&mddev->sb_wait); + + if (bio->bi_iter.bi_size == 0) { +diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c +index 3efaa9534a777..9a5aaac29099b 100644 +--- a/drivers/mmc/core/sdio_cis.c ++++ b/drivers/mmc/core/sdio_cis.c +@@ -20,6 +20,8 @@ + #include "sdio_cis.h" + #include "sdio_ops.h" + ++#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */ ++ + static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, + const unsigned char *buf, unsigned size) + { +@@ -266,6 +268,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) + + do { + unsigned char tpl_code, tpl_link; ++ unsigned long timeout = jiffies + ++ msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS); + + ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code); + if (ret) +@@ -318,6 +322,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) + prev = &this->next; + + if (ret == -ENOENT) { ++ if (time_after(jiffies, timeout)) ++ break; + /* warn about unknown tuples */ + pr_warn_ratelimited("%s: queuing unknown" + " CIS tuple 0x%02x (%u bytes)\n", +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c +index 469b155df4885..1af09fd3fed1c 100644 +--- a/drivers/net/dsa/mv88e6xxx/chip.c ++++ b/drivers/net/dsa/mv88e6xxx/chip.c +@@ -1517,7 +1517,11 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, + if (!entry.portvec) + entry.state = 0; + } else { +- entry.portvec |= BIT(port); ++ if (state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC) ++ entry.portvec = BIT(port); ++ else ++ entry.portvec |= BIT(port); ++ + entry.state = state; + } + +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +index c20dc689698ed..5acd599d6b9af 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +@@ -55,12 +55,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) + + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; + pfe.severity = PF_EVENT_SEVERITY_INFO; +- +- /* Always report link is down if the VF queues aren't enabled */ +- if (!vf->queues_enabled) { +- pfe.event_data.link_event.link_status = false; +- pfe.event_data.link_event.link_speed = 0; +- } else if (vf->link_forced) { ++ if (vf->link_forced) { + pfe.event_data.link_event.link_status = vf->link_up; + pfe.event_data.link_event.link_speed = + (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0); +@@ -70,7 +65,6 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) + pfe.event_data.link_event.link_speed = + i40e_virtchnl_link_speed(ls->link_speed); + } +- + i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, + 0, (u8 *)&pfe, sizeof(pfe), NULL); + } +@@ -2393,8 +2387,6 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) + } + } + +- vf->queues_enabled = true; +- + error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, +@@ -2416,9 +2408,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) + struct i40e_pf *pf = vf->pf; + i40e_status aq_ret = 0; + +- /* Immediately mark queues as disabled */ +- vf->queues_enabled = false; +- + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +index 7164b9bb294ff..f65cc0c165502 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +@@ -99,7 +99,6 @@ struct i40e_vf { + unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ + bool link_forced; + bool link_up; /* only valid if VF link is forced */ +- bool queues_enabled; /* true if the VF queues are enabled */ + bool spoofchk; + u16 num_mac; + u16 num_vlan; +diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c +index 0303eeb760505..0365bf2b480e3 100644 +--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c ++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c +@@ -1709,7 +1709,8 @@ static int igc_get_link_ksettings(struct net_device *netdev, + Asym_Pause); + } + +- status = rd32(IGC_STATUS); ++ status = pm_runtime_suspended(&adapter->pdev->dev) ? ++ 0 : rd32(IGC_STATUS); + + if (status & IGC_STATUS_LU) { + if (status & IGC_STATUS_SPEED_1000) { +diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c +index c25f555aaf822..ed5d09c11c389 100644 +--- a/drivers/net/ethernet/intel/igc/igc_i225.c ++++ b/drivers/net/ethernet/intel/igc/igc_i225.c +@@ -219,9 +219,9 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) + { + struct igc_nvm_info *nvm = &hw->nvm; ++ s32 ret_val = -IGC_ERR_NVM; + u32 attempts = 100000; + u32 i, k, eewr = 0; +- s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. +@@ -229,7 +229,6 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); +- ret_val = -IGC_ERR_NVM; + goto out; + } + +diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c +index 5eeb4c8caf4ae..08adf103e90b4 100644 +--- a/drivers/net/ethernet/intel/igc/igc_mac.c ++++ b/drivers/net/ethernet/intel/igc/igc_mac.c +@@ -647,7 +647,7 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw) + } + + out: +- return 0; ++ return ret_val; + } + + /** +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +index a30eb90ba3d28..dd590086fe6a5 100644 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +@@ -29,16 +29,16 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) + /* Clear entry invalidation bit */ + pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; + +- /* Write tcam index - indirect access */ +- mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); +- for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) +- mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); +- + /* Write sram index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]); + ++ /* Write tcam index - indirect access */ ++ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); ++ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) ++ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); ++ + return 0; + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 4944c40436f08..11e12761b0a6e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -1697,6 +1697,7 @@ search_again_locked: + if (!fte_tmp) + continue; + rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp); ++ /* No error check needed here, because insert_fte() is not called */ + up_write_ref_node(&fte_tmp->node, false); + tree_put_node(&fte_tmp->node, false); + kmem_cache_free(steering->ftes_cache, fte); +@@ -1745,6 +1746,8 @@ skip_search: + up_write_ref_node(&g->node, false); + rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); + up_write_ref_node(&fte->node, false); ++ if (IS_ERR(rule)) ++ tree_put_node(&fte->node, false); + return rule; + } + rule = ERR_PTR(-ENOENT); +@@ -1844,6 +1847,8 @@ search_again_locked: + up_write_ref_node(&g->node, false); + rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); + up_write_ref_node(&fte->node, false); ++ if (IS_ERR(rule)) ++ tree_put_node(&fte->node, false); + tree_put_node(&g->node, false); + return rule; + +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c +index 366ca1b5da5cc..1e8244ec5b332 100644 +--- a/drivers/net/ethernet/realtek/r8169_main.c ++++ b/drivers/net/ethernet/realtek/r8169_main.c +@@ -6419,10 +6419,10 @@ static int rtl8169_close(struct net_device *dev) + + cancel_work_sync(&tp->wk.work); + +- phy_disconnect(tp->phydev); +- + free_irq(pci_irq_vector(pdev, 0), tp); + ++ phy_disconnect(tp->phydev); ++ + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +index c54fe6650018e..7272d8522a9e9 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +@@ -134,7 +134,14 @@ static int iwl_configure_rxq(struct iwl_mvm *mvm) + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + +- /* Do not configure default queue, it is configured via context info */ ++ /* ++ * The default queue is configured via context info, so if we ++ * have a single queue, there's nothing to do here. ++ */ ++ if (mvm->trans->num_rx_queues == 1) ++ return 0; ++ ++ /* skip the default queue */ + num_queues = mvm->trans->num_rx_queues - 1; + + size = struct_size(cmd, data, num_queues); +diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c +index 196aa44c4936a..e0f411021c59d 100644 +--- a/drivers/nvdimm/dimm_devs.c ++++ b/drivers/nvdimm/dimm_devs.c +@@ -344,16 +344,16 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr, + } + static DEVICE_ATTR_RO(state); + +-static ssize_t available_slots_show(struct device *dev, +- struct device_attribute *attr, char *buf) ++static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf) + { +- struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); ++ struct device *dev; + ssize_t rc; + u32 nfree; + + if (!ndd) + return -ENXIO; + ++ dev = ndd->dev; + nvdimm_bus_lock(dev); + nfree = nd_label_nfree(ndd); + if (nfree - 1 > nfree) { +@@ -365,6 +365,18 @@ static ssize_t available_slots_show(struct device *dev, + nvdimm_bus_unlock(dev); + return rc; + } ++ ++static ssize_t available_slots_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ ssize_t rc; ++ ++ nd_device_lock(dev); ++ rc = __available_slots_show(dev_get_drvdata(dev), buf); ++ nd_device_unlock(dev); ++ ++ return rc; ++} + static DEVICE_ATTR_RO(available_slots); + + __weak ssize_t security_show(struct device *dev, +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index ef93bd3ed339c..434d3f21f0e13 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -3161,6 +3161,8 @@ static const struct pci_device_id nvme_id_table[] = { + { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, ++ { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ ++ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), + .driver_data = NVME_QUIRK_SINGLE_VECTOR }, + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, +diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c +index e31823f19a0fa..9242224156f5b 100644 +--- a/drivers/nvme/target/tcp.c ++++ b/drivers/nvme/target/tcp.c +@@ -292,7 +292,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) + length = cmd->pdu_len; + cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE); + offset = cmd->rbytes_done; +- cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE); ++ cmd->sg_idx = offset / PAGE_SIZE; + sg_offset = offset % PAGE_SIZE; + sg = &cmd->req.sg[cmd->sg_idx]; + +@@ -305,6 +305,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) + length -= iov_len; + sg = sg_next(sg); + iov++; ++ sg_offset = 0; + } + + iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov, +diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c +index 134dc2005ce97..c9f6e97582885 100644 +--- a/drivers/usb/class/usblp.c ++++ b/drivers/usb/class/usblp.c +@@ -1329,14 +1329,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol) + if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL) + return -EINVAL; + +- alts = usblp->protocol[protocol].alt_setting; +- if (alts < 0) +- return -EINVAL; +- r = usb_set_interface(usblp->dev, usblp->ifnum, alts); +- if (r < 0) { +- printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", +- alts, usblp->ifnum); +- return r; ++ /* Don't unnecessarily set the interface if there's a single alt. */ ++ if (usblp->intf->num_altsetting > 1) { ++ alts = usblp->protocol[protocol].alt_setting; ++ if (alts < 0) ++ return -EINVAL; ++ r = usb_set_interface(usblp->dev, usblp->ifnum, alts); ++ if (r < 0) { ++ printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", ++ alts, usblp->ifnum); ++ return r; ++ } + } + + usblp->bidir = (usblp->protocol[protocol].epread != NULL); +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c +index 70ac47a341ac2..e3f1f20c49221 100644 +--- a/drivers/usb/dwc2/gadget.c ++++ b/drivers/usb/dwc2/gadget.c +@@ -1543,7 +1543,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep, + static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, + u32 windex) + { +- struct dwc2_hsotg_ep *ep; + int dir = (windex & USB_DIR_IN) ? 1 : 0; + int idx = windex & 0x7F; + +@@ -1553,12 +1552,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, + if (idx > hsotg->num_of_eps) + return NULL; + +- ep = index_to_ep(hsotg, idx, dir); +- +- if (idx && ep->dir_in != dir) +- return NULL; +- +- return ep; ++ return index_to_ep(hsotg, idx, dir); + } + + /** +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c +index 440dbf55ddf70..90ec65d31059f 100644 +--- a/drivers/usb/dwc3/core.c ++++ b/drivers/usb/dwc3/core.c +@@ -1718,7 +1718,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) + if (PMSG_IS_AUTO(msg)) + break; + +- ret = dwc3_core_init(dwc); ++ ret = dwc3_core_init_for_resume(dwc); + if (ret) + return ret; + +diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c +index 30313b233680d..99c7fc0d1d597 100644 +--- a/drivers/usb/gadget/legacy/ether.c ++++ b/drivers/usb/gadget/legacy/ether.c +@@ -403,8 +403,10 @@ static int eth_bind(struct usb_composite_dev *cdev) + struct usb_descriptor_header *usb_desc; + + usb_desc = usb_otg_descriptor_alloc(gadget); +- if (!usb_desc) ++ if (!usb_desc) { ++ status = -ENOMEM; + goto fail1; ++ } + usb_otg_descriptor_init(gadget, usb_desc); + otg_desc[0] = usb_desc; + otg_desc[1] = NULL; +diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c +index 45c54d56ecbd5..b45e5bf089979 100644 +--- a/drivers/usb/host/xhci-mtk-sch.c ++++ b/drivers/usb/host/xhci-mtk-sch.c +@@ -200,6 +200,8 @@ static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev, + + sch_ep->sch_tt = tt; + sch_ep->ep = ep; ++ INIT_LIST_HEAD(&sch_ep->endpoint); ++ INIT_LIST_HEAD(&sch_ep->tt_endpoint); + + return sch_ep; + } +@@ -373,6 +375,7 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw, + sch_ep->bw_budget_table[j]; + } + } ++ sch_ep->allocated = used; + } + + static int check_sch_tt(struct usb_device *udev, +@@ -541,6 +544,22 @@ static int check_sch_bw(struct usb_device *udev, + return 0; + } + ++static void destroy_sch_ep(struct usb_device *udev, ++ struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep) ++{ ++ /* only release ep bw check passed by check_sch_bw() */ ++ if (sch_ep->allocated) ++ update_bus_bw(sch_bw, sch_ep, 0); ++ ++ list_del(&sch_ep->endpoint); ++ ++ if (sch_ep->sch_tt) { ++ list_del(&sch_ep->tt_endpoint); ++ drop_tt(udev); ++ } ++ kfree(sch_ep); ++} ++ + static bool need_bw_sch(struct usb_host_endpoint *ep, + enum usb_device_speed speed, int has_tt) + { +@@ -583,6 +602,8 @@ int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk) + + mtk->sch_array = sch_array; + ++ INIT_LIST_HEAD(&mtk->bw_ep_chk_list); ++ + return 0; + } + EXPORT_SYMBOL_GPL(xhci_mtk_sch_init); +@@ -601,19 +622,14 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + struct xhci_ep_ctx *ep_ctx; + struct xhci_slot_ctx *slot_ctx; + struct xhci_virt_device *virt_dev; +- struct mu3h_sch_bw_info *sch_bw; + struct mu3h_sch_ep_info *sch_ep; +- struct mu3h_sch_bw_info *sch_array; + unsigned int ep_index; +- int bw_index; +- int ret = 0; + + xhci = hcd_to_xhci(hcd); + virt_dev = xhci->devs[udev->slot_id]; + ep_index = xhci_get_endpoint_index(&ep->desc); + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); +- sch_array = mtk->sch_array; + + xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n", + __func__, usb_endpoint_type(&ep->desc), udev->speed, +@@ -632,35 +648,13 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + return 0; + } + +- bw_index = get_bw_index(xhci, udev, ep); +- sch_bw = &sch_array[bw_index]; +- + sch_ep = create_sch_ep(udev, ep, ep_ctx); + if (IS_ERR_OR_NULL(sch_ep)) + return -ENOMEM; + + setup_sch_info(udev, ep_ctx, sch_ep); + +- ret = check_sch_bw(udev, sch_bw, sch_ep); +- if (ret) { +- xhci_err(xhci, "Not enough bandwidth!\n"); +- if (is_fs_or_ls(udev->speed)) +- drop_tt(udev); +- +- kfree(sch_ep); +- return -ENOSPC; +- } +- +- list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list); +- +- ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts) +- | EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode)); +- ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset) +- | EP_BREPEAT(sch_ep->repeat)); +- +- xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n", +- sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode, +- sch_ep->offset, sch_ep->repeat); ++ list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_chk_list); + + return 0; + } +@@ -675,7 +669,7 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + struct xhci_virt_device *virt_dev; + struct mu3h_sch_bw_info *sch_array; + struct mu3h_sch_bw_info *sch_bw; +- struct mu3h_sch_ep_info *sch_ep; ++ struct mu3h_sch_ep_info *sch_ep, *tmp; + int bw_index; + + xhci = hcd_to_xhci(hcd); +@@ -694,17 +688,79 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + bw_index = get_bw_index(xhci, udev, ep); + sch_bw = &sch_array[bw_index]; + +- list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) { ++ list_for_each_entry_safe(sch_ep, tmp, &sch_bw->bw_ep_list, endpoint) { + if (sch_ep->ep == ep) { +- update_bus_bw(sch_bw, sch_ep, 0); +- list_del(&sch_ep->endpoint); +- if (is_fs_or_ls(udev->speed)) { +- list_del(&sch_ep->tt_endpoint); +- drop_tt(udev); +- } +- kfree(sch_ep); ++ destroy_sch_ep(udev, sch_bw, sch_ep); + break; + } + } + } + EXPORT_SYMBOL_GPL(xhci_mtk_drop_ep_quirk); ++ ++int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ++{ ++ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd); ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd); ++ struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; ++ struct mu3h_sch_bw_info *sch_bw; ++ struct mu3h_sch_ep_info *sch_ep, *tmp; ++ int bw_index, ret; ++ ++ xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev)); ++ ++ list_for_each_entry(sch_ep, &mtk->bw_ep_chk_list, endpoint) { ++ bw_index = get_bw_index(xhci, udev, sch_ep->ep); ++ sch_bw = &mtk->sch_array[bw_index]; ++ ++ ret = check_sch_bw(udev, sch_bw, sch_ep); ++ if (ret) { ++ xhci_err(xhci, "Not enough bandwidth!\n"); ++ return -ENOSPC; ++ } ++ } ++ ++ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) { ++ struct xhci_ep_ctx *ep_ctx; ++ struct usb_host_endpoint *ep = sch_ep->ep; ++ unsigned int ep_index = xhci_get_endpoint_index(&ep->desc); ++ ++ bw_index = get_bw_index(xhci, udev, ep); ++ sch_bw = &mtk->sch_array[bw_index]; ++ ++ list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list); ++ ++ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); ++ ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts) ++ | EP_BCSCOUNT(sch_ep->cs_count) ++ | EP_BBM(sch_ep->burst_mode)); ++ ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset) ++ | EP_BREPEAT(sch_ep->repeat)); ++ ++ xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n", ++ sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode, ++ sch_ep->offset, sch_ep->repeat); ++ } ++ ++ return xhci_check_bandwidth(hcd, udev); ++} ++EXPORT_SYMBOL_GPL(xhci_mtk_check_bandwidth); ++ ++void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ++{ ++ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd); ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd); ++ struct mu3h_sch_bw_info *sch_bw; ++ struct mu3h_sch_ep_info *sch_ep, *tmp; ++ int bw_index; ++ ++ xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev)); ++ ++ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) { ++ bw_index = get_bw_index(xhci, udev, sch_ep->ep); ++ sch_bw = &mtk->sch_array[bw_index]; ++ destroy_sch_ep(udev, sch_bw, sch_ep); ++ } ++ ++ xhci_reset_bandwidth(hcd, udev); ++} ++EXPORT_SYMBOL_GPL(xhci_mtk_reset_bandwidth); +diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c +index 85f1ff0399a9c..09b67219fd146 100644 +--- a/drivers/usb/host/xhci-mtk.c ++++ b/drivers/usb/host/xhci-mtk.c +@@ -347,6 +347,8 @@ static void usb_wakeup_set(struct xhci_hcd_mtk *mtk, bool enable) + static int xhci_mtk_setup(struct usb_hcd *hcd); + static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = { + .reset = xhci_mtk_setup, ++ .check_bandwidth = xhci_mtk_check_bandwidth, ++ .reset_bandwidth = xhci_mtk_reset_bandwidth, + }; + + static struct hc_driver __read_mostly xhci_mtk_hc_driver; +diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h +index 5ac458b7d2e0e..734c5513aa1bf 100644 +--- a/drivers/usb/host/xhci-mtk.h ++++ b/drivers/usb/host/xhci-mtk.h +@@ -59,6 +59,7 @@ struct mu3h_sch_bw_info { + * @ep_type: endpoint type + * @maxpkt: max packet size of endpoint + * @ep: address of usb_host_endpoint struct ++ * @allocated: the bandwidth is aready allocated from bus_bw + * @offset: which uframe of the interval that transfer should be + * scheduled first time within the interval + * @repeat: the time gap between two uframes that transfers are +@@ -86,6 +87,7 @@ struct mu3h_sch_ep_info { + u32 ep_type; + u32 maxpkt; + void *ep; ++ bool allocated; + /* + * mtk xHCI scheduling information put into reserved DWs + * in ep context +@@ -131,6 +133,7 @@ struct xhci_hcd_mtk { + struct device *dev; + struct usb_hcd *hcd; + struct mu3h_sch_bw_info *sch_array; ++ struct list_head bw_ep_chk_list; + struct mu3c_ippc_regs __iomem *ippc_regs; + bool has_ippc; + int num_u2_ports; +@@ -166,6 +169,8 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep); + void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep); ++int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); ++void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); + + #else + static inline int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, +@@ -179,6 +184,16 @@ static inline void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, + { + } + ++static inline int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, ++ struct usb_device *udev) ++{ ++ return 0; ++} ++ ++static inline void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, ++ struct usb_device *udev) ++{ ++} + #endif + + #endif /* _XHCI_MTK_H_ */ +diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c +index 60651a50770f9..f27d5c2c42f31 100644 +--- a/drivers/usb/host/xhci-mvebu.c ++++ b/drivers/usb/host/xhci-mvebu.c +@@ -8,6 +8,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -74,6 +75,47 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) + return 0; + } + ++int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd) ++{ ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd); ++ struct device *dev = hcd->self.controller; ++ struct phy *phy; ++ int ret; ++ ++ /* Old bindings miss the PHY handle */ ++ phy = of_phy_get(dev->of_node, "usb3-phy"); ++ if (IS_ERR(phy) && PTR_ERR(phy) == -EPROBE_DEFER) ++ return -EPROBE_DEFER; ++ else if (IS_ERR(phy)) ++ goto phy_out; ++ ++ ret = phy_init(phy); ++ if (ret) ++ goto phy_put; ++ ++ ret = phy_set_mode(phy, PHY_MODE_USB_HOST_SS); ++ if (ret) ++ goto phy_exit; ++ ++ ret = phy_power_on(phy); ++ if (ret == -EOPNOTSUPP) { ++ /* Skip initializatin of XHCI PHY when it is unsupported by firmware */ ++ dev_warn(dev, "PHY unsupported by firmware\n"); ++ xhci->quirks |= XHCI_SKIP_PHY_INIT; ++ } ++ if (ret) ++ goto phy_exit; ++ ++ phy_power_off(phy); ++phy_exit: ++ phy_exit(phy); ++phy_put: ++ phy_put(phy); ++phy_out: ++ ++ return 0; ++} ++ + int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) + { + struct xhci_hcd *xhci = hcd_to_xhci(hcd); +diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h +index ca0a3a5721dd7..74b4d21a498a0 100644 +--- a/drivers/usb/host/xhci-mvebu.h ++++ b/drivers/usb/host/xhci-mvebu.h +@@ -12,6 +12,7 @@ struct usb_hcd; + + #if IS_ENABLED(CONFIG_USB_XHCI_MVEBU) + int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd); ++int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd); + int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd); + #else + static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) +@@ -19,6 +20,11 @@ static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) + return 0; + } + ++static inline int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd) ++{ ++ return 0; ++} ++ + static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) + { + return 0; +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c +index 52c625c023410..84cfa85442852 100644 +--- a/drivers/usb/host/xhci-plat.c ++++ b/drivers/usb/host/xhci-plat.c +@@ -44,6 +44,16 @@ static void xhci_priv_plat_start(struct usb_hcd *hcd) + priv->plat_start(hcd); + } + ++static int xhci_priv_plat_setup(struct usb_hcd *hcd) ++{ ++ struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); ++ ++ if (!priv->plat_setup) ++ return 0; ++ ++ return priv->plat_setup(hcd); ++} ++ + static int xhci_priv_init_quirk(struct usb_hcd *hcd) + { + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); +@@ -101,6 +111,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = { + }; + + static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = { ++ .plat_setup = xhci_mvebu_a3700_plat_setup, + .init_quirk = xhci_mvebu_a3700_init_quirk, + }; + +@@ -163,6 +174,8 @@ static int xhci_plat_probe(struct platform_device *pdev) + struct usb_hcd *hcd; + int ret; + int irq; ++ struct xhci_plat_priv *priv = NULL; ++ + + if (usb_disabled()) + return -ENODEV; +@@ -257,8 +270,7 @@ static int xhci_plat_probe(struct platform_device *pdev) + + priv_match = of_device_get_match_data(&pdev->dev); + if (priv_match) { +- struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); +- ++ priv = hcd_to_xhci_priv(hcd); + /* Just copy data for now */ + if (priv_match) + *priv = *priv_match; +@@ -307,6 +319,16 @@ static int xhci_plat_probe(struct platform_device *pdev) + + hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node); + xhci->shared_hcd->tpl_support = hcd->tpl_support; ++ ++ if (priv) { ++ ret = xhci_priv_plat_setup(hcd); ++ if (ret) ++ goto disable_usb_phy; ++ } ++ ++ if ((xhci->quirks & XHCI_SKIP_PHY_INIT) || (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))) ++ hcd->skip_phy_initialization = 1; ++ + ret = usb_add_hcd(hcd, irq, IRQF_SHARED); + if (ret) + goto disable_usb_phy; +diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h +index 5681723fc9cd7..b7749151bdfb8 100644 +--- a/drivers/usb/host/xhci-plat.h ++++ b/drivers/usb/host/xhci-plat.h +@@ -13,6 +13,7 @@ + struct xhci_plat_priv { + const char *firmware_name; + unsigned long long quirks; ++ int (*plat_setup)(struct usb_hcd *); + void (*plat_start)(struct usb_hcd *); + int (*init_quirk)(struct usb_hcd *); + int (*resume_quirk)(struct usb_hcd *); +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 52e156c018042..900ea91fb3c6b 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -695,11 +695,16 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, + dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, + DMA_FROM_DEVICE); + /* for in tranfers we need to copy the data from bounce to sg */ +- len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, +- seg->bounce_len, seg->bounce_offs); +- if (len != seg->bounce_len) +- xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", +- len, seg->bounce_len); ++ if (urb->num_sgs) { ++ len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, ++ seg->bounce_len, seg->bounce_offs); ++ if (len != seg->bounce_len) ++ xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", ++ len, seg->bounce_len); ++ } else { ++ memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf, ++ seg->bounce_len); ++ } + seg->bounce_len = 0; + seg->bounce_offs = 0; + } +@@ -3263,12 +3268,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, + + /* create a max max_pkt sized bounce buffer pointed to by last trb */ + if (usb_urb_dir_out(urb)) { +- len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, +- seg->bounce_buf, new_buff_len, enqd_len); +- if (len != new_buff_len) +- xhci_warn(xhci, +- "WARN Wrong bounce buffer write length: %zu != %d\n", +- len, new_buff_len); ++ if (urb->num_sgs) { ++ len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, ++ seg->bounce_buf, new_buff_len, enqd_len); ++ if (len != new_buff_len) ++ xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n", ++ len, new_buff_len); ++ } else { ++ memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len); ++ } ++ + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, + max_pkt, DMA_TO_DEVICE); + } else { +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 70aa3055c41e7..91330517444e7 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -2861,7 +2861,7 @@ static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, + * else should be touching the xhci->devs[slot_id] structure, so we + * don't need to take the xhci->lock for manipulating that. + */ +-static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ++int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) + { + int i; + int ret = 0; +@@ -2959,7 +2959,7 @@ command_cleanup: + return ret; + } + +-static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ++void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) + { + struct xhci_hcd *xhci; + struct xhci_virt_device *virt_dev; +@@ -5380,6 +5380,10 @@ void xhci_init_driver(struct hc_driver *drv, + drv->reset = over->reset; + if (over->start) + drv->start = over->start; ++ if (over->check_bandwidth) ++ drv->check_bandwidth = over->check_bandwidth; ++ if (over->reset_bandwidth) ++ drv->reset_bandwidth = over->reset_bandwidth; + } + } + EXPORT_SYMBOL_GPL(xhci_init_driver); +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index b483317bcb17b..1ad1d6e9e9979 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -1873,6 +1873,7 @@ struct xhci_hcd { + #define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33) + #define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34) + #define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35) ++#define XHCI_SKIP_PHY_INIT BIT_ULL(37) + #define XHCI_DISABLE_SPARSE BIT_ULL(38) + + unsigned int num_active_eps; +@@ -1911,6 +1912,8 @@ struct xhci_driver_overrides { + size_t extra_priv_size; + int (*reset)(struct usb_hcd *hcd); + int (*start)(struct usb_hcd *hcd); ++ int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); ++ void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); + }; + + #define XHCI_CFC_DELAY 10 +@@ -2063,6 +2066,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); + void xhci_shutdown(struct usb_hcd *hcd); + void xhci_init_driver(struct hc_driver *drv, + const struct xhci_driver_overrides *over); ++int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); ++void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); + int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); + int xhci_ext_cap_init(struct xhci_hcd *xhci); + +diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c +index 05cdad13933b1..cfc16943979d5 100644 +--- a/drivers/usb/renesas_usbhs/fifo.c ++++ b/drivers/usb/renesas_usbhs/fifo.c +@@ -126,6 +126,7 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) + } + + usbhs_pipe_clear_without_sequence(pipe, 0, 0); ++ usbhs_pipe_running(pipe, 0); + + __usbhsf_pkt_del(pkt); + } +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index a90801ef00554..361a2e3ccad8d 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -61,6 +61,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ + { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ + { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ ++ { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */ + { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */ + { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ + { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ +@@ -201,6 +202,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ + { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ + { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ ++ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */ + { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index fd41b07b5aaf1..f49eae18500cc 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -425,6 +425,8 @@ static void option_instat_callback(struct urb *urb); + #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084 + #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085 + #define CINTERION_PRODUCT_CLS8 0x00b0 ++#define CINTERION_PRODUCT_MV31_MBIM 0x00b3 ++#define CINTERION_PRODUCT_MV31_RMNET 0x00b7 + + /* Olivetti products */ + #define OLIVETTI_VENDOR_ID 0x0b3c +@@ -1914,6 +1916,10 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff), ++ .driver_info = RSVD(3)}, ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff), ++ .driver_info = RSVD(0)}, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), + .driver_info = RSVD(4) }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), +diff --git a/fs/afs/main.c b/fs/afs/main.c +index c9c45d7078bd1..5cd26af2464c9 100644 +--- a/fs/afs/main.c ++++ b/fs/afs/main.c +@@ -186,7 +186,7 @@ static int __init afs_init(void) + goto error_cache; + #endif + +- ret = register_pernet_subsys(&afs_net_ops); ++ ret = register_pernet_device(&afs_net_ops); + if (ret < 0) + goto error_net; + +@@ -206,7 +206,7 @@ static int __init afs_init(void) + error_proc: + afs_fs_exit(); + error_fs: +- unregister_pernet_subsys(&afs_net_ops); ++ unregister_pernet_device(&afs_net_ops); + error_net: + #ifdef CONFIG_AFS_FSCACHE + fscache_unregister_netfs(&afs_cache_netfs); +@@ -237,7 +237,7 @@ static void __exit afs_exit(void) + + proc_remove(afs_proc_symlink); + afs_fs_exit(); +- unregister_pernet_subsys(&afs_net_ops); ++ unregister_pernet_device(&afs_net_ops); + #ifdef CONFIG_AFS_FSCACHE + fscache_unregister_netfs(&afs_cache_netfs); + #endif +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c +index 5a35850ccb1ab..9ae9a514676c3 100644 +--- a/fs/cifs/dir.c ++++ b/fs/cifs/dir.c +@@ -738,6 +738,7 @@ static int + cifs_d_revalidate(struct dentry *direntry, unsigned int flags) + { + struct inode *inode; ++ int rc; + + if (flags & LOOKUP_RCU) + return -ECHILD; +@@ -747,8 +748,25 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags) + if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode))) + CIFS_I(inode)->time = 0; /* force reval */ + +- if (cifs_revalidate_dentry(direntry)) +- return 0; ++ rc = cifs_revalidate_dentry(direntry); ++ if (rc) { ++ cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc); ++ switch (rc) { ++ case -ENOENT: ++ case -ESTALE: ++ /* ++ * Those errors mean the dentry is invalid ++ * (file was deleted or recreated) ++ */ ++ return 0; ++ default: ++ /* ++ * Otherwise some unexpected error happened ++ * report it as-is to VFS layer ++ */ ++ return rc; ++ } ++ } + else { + /* + * If the inode wasn't known to be a dfs entry when +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h +index 2482978f09486..739556e385be8 100644 +--- a/fs/cifs/smb2pdu.h ++++ b/fs/cifs/smb2pdu.h +@@ -227,7 +227,7 @@ struct smb2_negotiate_req { + __le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */ + __le16 NegotiateContextCount; /* SMB3.1.1 only. MBZ earlier */ + __le16 Reserved2; +- __le16 Dialects[1]; /* One dialect (vers=) at a time for now */ ++ __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */ + } __packed; + + /* Dialects */ +diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c +index 4ffbf8f965814..eab7940bfebef 100644 +--- a/fs/cifs/transport.c ++++ b/fs/cifs/transport.c +@@ -659,10 +659,22 @@ wait_for_compound_request(struct TCP_Server_Info *server, int num, + spin_lock(&server->req_lock); + if (*credits < num) { + /* +- * Return immediately if not too many requests in flight since +- * we will likely be stuck on waiting for credits. ++ * If the server is tight on resources or just gives us less ++ * credits for other reasons (e.g. requests are coming out of ++ * order and the server delays granting more credits until it ++ * processes a missing mid) and we exhausted most available ++ * credits there may be situations when we try to send ++ * a compound request but we don't have enough credits. At this ++ * point the client needs to decide if it should wait for ++ * additional credits or fail the request. If at least one ++ * request is in flight there is a high probability that the ++ * server will return enough credits to satisfy this compound ++ * request. ++ * ++ * Return immediately if no requests in flight since we will be ++ * stuck on waiting for credits. + */ +- if (server->in_flight < num - *credits) { ++ if (server->in_flight == 0) { + spin_unlock(&server->req_lock); + return -ENOTSUPP; + } +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c +index 5fff7cb3582f0..cf3af2140c3d8 100644 +--- a/fs/hugetlbfs/inode.c ++++ b/fs/hugetlbfs/inode.c +@@ -675,9 +675,10 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, + + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + ++ set_page_huge_active(page); + /* + * unlock_page because locked by add_to_page_cache() +- * page_put due to reference from alloc_huge_page() ++ * put_page() due to reference from alloc_huge_page() + */ + unlock_page(page); + put_page(page); +diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c +index 29abdb1d3b5c6..6509ec3cb3730 100644 +--- a/fs/overlayfs/dir.c ++++ b/fs/overlayfs/dir.c +@@ -940,8 +940,8 @@ static char *ovl_get_redirect(struct dentry *dentry, bool abs_redirect) + + buflen -= thislen; + memcpy(&buf[buflen], name, thislen); +- tmp = dget_dlock(d->d_parent); + spin_unlock(&d->d_lock); ++ tmp = dget_parent(d); + + dput(d); + d = tmp; +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h +index 8a03f392f3680..0e080ba5efbcc 100644 +--- a/include/linux/hugetlb.h ++++ b/include/linux/hugetlb.h +@@ -590,6 +590,8 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, + } + #endif + ++void set_page_huge_active(struct page *page); ++ + #else /* CONFIG_HUGETLB_PAGE */ + struct hstate {}; + +diff --git a/include/linux/msi.h b/include/linux/msi.h +index 8ad679e9d9c04..d695e2eb2092d 100644 +--- a/include/linux/msi.h ++++ b/include/linux/msi.h +@@ -139,6 +139,12 @@ struct msi_desc { + list_for_each_entry((desc), dev_to_msi_list((dev)), list) + #define for_each_msi_entry_safe(desc, tmp, dev) \ + list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list) ++#define for_each_msi_vector(desc, __irq, dev) \ ++ for_each_msi_entry((desc), (dev)) \ ++ if ((desc)->irq) \ ++ for (__irq = (desc)->irq; \ ++ __irq < ((desc)->irq + (desc)->nvec_used); \ ++ __irq++) + + #ifdef CONFIG_IRQ_MSI_IOMMU + static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h +index 3d03756e10699..b2ceec7b280d4 100644 +--- a/include/net/sch_generic.h ++++ b/include/net/sch_generic.h +@@ -1158,7 +1158,7 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, + old = *pold; + *pold = new; + if (old != NULL) +- qdisc_tree_flush_backlog(old); ++ qdisc_purge_queue(old); + sch_tree_unlock(sch); + + return old; +diff --git a/init/init_task.c b/init/init_task.c +index df7041be96fca..5d8359c44564a 100644 +--- a/init/init_task.c ++++ b/init/init_task.c +@@ -171,7 +171,8 @@ struct task_struct init_task + .lockdep_recursion = 0, + #endif + #ifdef CONFIG_FUNCTION_GRAPH_TRACER +- .ret_stack = NULL, ++ .ret_stack = NULL, ++ .tracing_graph_pause = ATOMIC_INIT(0), + #endif + #if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION) + .trace_recursion = 0, +diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c +index 5a8b4dfdb1419..c2f0aa818b7af 100644 +--- a/kernel/bpf/cgroup.c ++++ b/kernel/bpf/cgroup.c +@@ -1109,6 +1109,11 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, + goto out; + } + ++ if (ctx.optlen < 0) { ++ ret = -EFAULT; ++ goto out; ++ } ++ + if (copy_from_user(ctx.optval, optval, + min(ctx.optlen, max_optlen)) != 0) { + ret = -EFAULT; +@@ -1126,7 +1131,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, + goto out; + } + +- if (ctx.optlen > max_optlen) { ++ if (ctx.optlen > max_optlen || ctx.optlen < 0) { + ret = -EFAULT; + goto out; + } +diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c +index eb95f6106a1ee..5d3da0db092ff 100644 +--- a/kernel/irq/msi.c ++++ b/kernel/irq/msi.c +@@ -437,22 +437,22 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, + + can_reserve = msi_check_reservation_mode(domain, info, dev); + +- for_each_msi_entry(desc, dev) { +- virq = desc->irq; +- if (desc->nvec_used == 1) +- dev_dbg(dev, "irq %d for MSI\n", virq); +- else ++ /* ++ * This flag is set by the PCI layer as we need to activate ++ * the MSI entries before the PCI layer enables MSI in the ++ * card. Otherwise the card latches a random msi message. ++ */ ++ if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) ++ goto skip_activate; ++ ++ for_each_msi_vector(desc, i, dev) { ++ if (desc->irq == i) { ++ virq = desc->irq; + dev_dbg(dev, "irq [%d-%d] for MSI\n", + virq, virq + desc->nvec_used - 1); +- /* +- * This flag is set by the PCI layer as we need to activate +- * the MSI entries before the PCI layer enables MSI in the +- * card. Otherwise the card latches a random msi message. +- */ +- if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) +- continue; ++ } + +- irq_data = irq_domain_get_irq_data(domain, desc->irq); ++ irq_data = irq_domain_get_irq_data(domain, i); + if (!can_reserve) { + irqd_clr_can_reserve(irq_data); + if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK) +@@ -463,28 +463,24 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, + goto cleanup; + } + ++skip_activate: + /* + * If these interrupts use reservation mode, clear the activated bit + * so request_irq() will assign the final vector. + */ + if (can_reserve) { +- for_each_msi_entry(desc, dev) { +- irq_data = irq_domain_get_irq_data(domain, desc->irq); ++ for_each_msi_vector(desc, i, dev) { ++ irq_data = irq_domain_get_irq_data(domain, i); + irqd_clr_activated(irq_data); + } + } + return 0; + + cleanup: +- for_each_msi_entry(desc, dev) { +- struct irq_data *irqd; +- +- if (desc->irq == virq) +- break; +- +- irqd = irq_domain_get_irq_data(domain, desc->irq); +- if (irqd_is_activated(irqd)) +- irq_domain_deactivate_irq(irqd); ++ for_each_msi_vector(desc, i, dev) { ++ irq_data = irq_domain_get_irq_data(domain, i); ++ if (irqd_is_activated(irq_data)) ++ irq_domain_deactivate_irq(irq_data); + } + msi_domain_free_irqs(domain, dev); + return ret; +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index 283c8b01ce789..26ae92c12fc22 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -1972,6 +1972,10 @@ int register_kretprobe(struct kretprobe *rp) + if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset)) + return -EINVAL; + ++ /* If only rp->kp.addr is specified, check reregistering kprobes */ ++ if (rp->kp.addr && check_kprobe_rereg(&rp->kp)) ++ return -EINVAL; ++ + if (kretprobe_blacklist_size) { + addr = kprobe_addr(&rp->kp); + if (IS_ERR(addr)) +diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c +index 7950a0356042a..888cd00174fe3 100644 +--- a/kernel/trace/fgraph.c ++++ b/kernel/trace/fgraph.c +@@ -367,7 +367,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) + } + + if (t->ret_stack == NULL) { +- atomic_set(&t->tracing_graph_pause, 0); + atomic_set(&t->trace_overrun, 0); + t->curr_ret_stack = -1; + t->curr_ret_depth = -1; +@@ -462,7 +461,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); + static void + graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) + { +- atomic_set(&t->tracing_graph_pause, 0); + atomic_set(&t->trace_overrun, 0); + t->ftrace_timestamp = 0; + /* make curr_ret_stack visible before we add the ret_stack */ +diff --git a/mm/compaction.c b/mm/compaction.c +index 92470625f0b1e..88c3f6bad1aba 100644 +--- a/mm/compaction.c ++++ b/mm/compaction.c +@@ -1276,7 +1276,7 @@ fast_isolate_freepages(struct compact_control *cc) + { + unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1); + unsigned int nr_scanned = 0; +- unsigned long low_pfn, min_pfn, high_pfn = 0, highest = 0; ++ unsigned long low_pfn, min_pfn, highest = 0; + unsigned long nr_isolated = 0; + unsigned long distance; + struct page *page = NULL; +@@ -1321,6 +1321,7 @@ fast_isolate_freepages(struct compact_control *cc) + struct page *freepage; + unsigned long flags; + unsigned int order_scanned = 0; ++ unsigned long high_pfn = 0; + + if (!area->nr_free) + continue; +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 11aa763a31440..7bbf419bb86d6 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -2306,7 +2306,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + { + spinlock_t *ptl; + struct mmu_notifier_range range; +- bool was_locked = false; ++ bool do_unlock_page = false; + pmd_t _pmd; + + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, +@@ -2322,7 +2322,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + VM_BUG_ON(freeze && !page); + if (page) { + VM_WARN_ON_ONCE(!PageLocked(page)); +- was_locked = true; + if (page != pmd_page(*pmd)) + goto out; + } +@@ -2331,19 +2330,29 @@ repeat: + if (pmd_trans_huge(*pmd)) { + if (!page) { + page = pmd_page(*pmd); +- if (unlikely(!trylock_page(page))) { +- get_page(page); +- _pmd = *pmd; +- spin_unlock(ptl); +- lock_page(page); +- spin_lock(ptl); +- if (unlikely(!pmd_same(*pmd, _pmd))) { +- unlock_page(page); ++ /* ++ * An anonymous page must be locked, to ensure that a ++ * concurrent reuse_swap_page() sees stable mapcount; ++ * but reuse_swap_page() is not used on shmem or file, ++ * and page lock must not be taken when zap_pmd_range() ++ * calls __split_huge_pmd() while i_mmap_lock is held. ++ */ ++ if (PageAnon(page)) { ++ if (unlikely(!trylock_page(page))) { ++ get_page(page); ++ _pmd = *pmd; ++ spin_unlock(ptl); ++ lock_page(page); ++ spin_lock(ptl); ++ if (unlikely(!pmd_same(*pmd, _pmd))) { ++ unlock_page(page); ++ put_page(page); ++ page = NULL; ++ goto repeat; ++ } + put_page(page); +- page = NULL; +- goto repeat; + } +- put_page(page); ++ do_unlock_page = true; + } + } + if (PageMlocked(page)) +@@ -2353,7 +2362,7 @@ repeat: + __split_huge_pmd_locked(vma, pmd, range.start, freeze); + out: + spin_unlock(ptl); +- if (!was_locked && page) ++ if (do_unlock_page) + unlock_page(page); + /* + * No need to double call mmu_notifier->invalidate_range() callback. +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 3bc33fa838177..d5b03b9262d4f 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -71,6 +71,21 @@ DEFINE_SPINLOCK(hugetlb_lock); + static int num_fault_mutexes; + struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; + ++static inline bool PageHugeFreed(struct page *head) ++{ ++ return page_private(head + 4) == -1UL; ++} ++ ++static inline void SetPageHugeFreed(struct page *head) ++{ ++ set_page_private(head + 4, -1UL); ++} ++ ++static inline void ClearPageHugeFreed(struct page *head) ++{ ++ set_page_private(head + 4, 0); ++} ++ + /* Forward declaration */ + static int hugetlb_acct_memory(struct hstate *h, long delta); + +@@ -869,6 +884,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page) + list_move(&page->lru, &h->hugepage_freelists[nid]); + h->free_huge_pages++; + h->free_huge_pages_node[nid]++; ++ SetPageHugeFreed(page); + } + + static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) +@@ -886,6 +902,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) + return NULL; + list_move(&page->lru, &h->hugepage_activelist); + set_page_refcounted(page); ++ ClearPageHugeFreed(page); + h->free_huge_pages--; + h->free_huge_pages_node[nid]--; + return page; +@@ -1217,12 +1234,11 @@ struct hstate *size_to_hstate(unsigned long size) + */ + bool page_huge_active(struct page *page) + { +- VM_BUG_ON_PAGE(!PageHuge(page), page); +- return PageHead(page) && PagePrivate(&page[1]); ++ return PageHeadHuge(page) && PagePrivate(&page[1]); + } + + /* never called for tail page */ +-static void set_page_huge_active(struct page *page) ++void set_page_huge_active(struct page *page) + { + VM_BUG_ON_PAGE(!PageHeadHuge(page), page); + SetPagePrivate(&page[1]); +@@ -1375,6 +1391,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) + set_hugetlb_cgroup(page, NULL); + h->nr_huge_pages++; + h->nr_huge_pages_node[nid]++; ++ ClearPageHugeFreed(page); + spin_unlock(&hugetlb_lock); + } + +@@ -1602,6 +1619,7 @@ int dissolve_free_huge_page(struct page *page) + { + int rc = -EBUSY; + ++retry: + /* Not to disrupt normal path by vainly holding hugetlb_lock */ + if (!PageHuge(page)) + return 0; +@@ -1618,6 +1636,26 @@ int dissolve_free_huge_page(struct page *page) + int nid = page_to_nid(head); + if (h->free_huge_pages - h->resv_huge_pages == 0) + goto out; ++ ++ /* ++ * We should make sure that the page is already on the free list ++ * when it is dissolved. ++ */ ++ if (unlikely(!PageHugeFreed(head))) { ++ spin_unlock(&hugetlb_lock); ++ cond_resched(); ++ ++ /* ++ * Theoretically, we should return -EBUSY when we ++ * encounter this race. In fact, we have a chance ++ * to successfully dissolve the page if we do a ++ * retry. Because the race window is quite small. ++ * If we seize this opportunity, it is an optimization ++ * for increasing the success rate of dissolving page. ++ */ ++ goto retry; ++ } ++ + /* + * Move PageHWPoison flag from head page to the raw error page, + * which makes any subpages rather than the error page reusable. +@@ -5136,9 +5174,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list) + { + bool ret = true; + +- VM_BUG_ON_PAGE(!PageHead(page), page); + spin_lock(&hugetlb_lock); +- if (!page_huge_active(page) || !get_page_unless_zero(page)) { ++ if (!PageHeadHuge(page) || !page_huge_active(page) || ++ !get_page_unless_zero(page)) { + ret = false; + goto unlock; + } +diff --git a/mm/memblock.c b/mm/memblock.c +index c4b16cae2bc9b..11f6ae37d6699 100644 +--- a/mm/memblock.c ++++ b/mm/memblock.c +@@ -257,14 +257,6 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, + * + * Find @size free area aligned to @align in the specified range and node. + * +- * When allocation direction is bottom-up, the @start should be greater +- * than the end of the kernel image. Otherwise, it will be trimmed. The +- * reason is that we want the bottom-up allocation just near the kernel +- * image so it is highly likely that the allocated memory and the kernel +- * will reside in the same node. +- * +- * If bottom-up allocation failed, will try to allocate memory top-down. +- * + * Return: + * Found address on success, 0 on failure. + */ +@@ -273,8 +265,6 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, + phys_addr_t end, int nid, + enum memblock_flags flags) + { +- phys_addr_t kernel_end, ret; +- + /* pump up @end */ + if (end == MEMBLOCK_ALLOC_ACCESSIBLE || + end == MEMBLOCK_ALLOC_KASAN) +@@ -283,40 +273,13 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, + /* avoid allocating the first page */ + start = max_t(phys_addr_t, start, PAGE_SIZE); + end = max(start, end); +- kernel_end = __pa_symbol(_end); +- +- /* +- * try bottom-up allocation only when bottom-up mode +- * is set and @end is above the kernel image. +- */ +- if (memblock_bottom_up() && end > kernel_end) { +- phys_addr_t bottom_up_start; +- +- /* make sure we will allocate above the kernel */ +- bottom_up_start = max(start, kernel_end); + +- /* ok, try bottom-up allocation first */ +- ret = __memblock_find_range_bottom_up(bottom_up_start, end, +- size, align, nid, flags); +- if (ret) +- return ret; +- +- /* +- * we always limit bottom-up allocation above the kernel, +- * but top-down allocation doesn't have the limit, so +- * retrying top-down allocation may succeed when bottom-up +- * allocation failed. +- * +- * bottom-up allocation is expected to be fail very rarely, +- * so we use WARN_ONCE() here to see the stack trace if +- * fail happens. +- */ +- WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE), +- "memblock: bottom-up allocation failed, memory hotremove may be affected\n"); +- } +- +- return __memblock_find_range_top_down(start, end, size, align, nid, +- flags); ++ if (memblock_bottom_up()) ++ return __memblock_find_range_bottom_up(start, end, size, align, ++ nid, flags); ++ else ++ return __memblock_find_range_top_down(start, end, size, align, ++ nid, flags); + } + + /** +diff --git a/net/core/neighbour.c b/net/core/neighbour.c +index 6c270fce200f4..7080d708b7d08 100644 +--- a/net/core/neighbour.c ++++ b/net/core/neighbour.c +@@ -1244,13 +1244,14 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr, + old = neigh->nud_state; + err = -EPERM; + +- if (!(flags & NEIGH_UPDATE_F_ADMIN) && +- (old & (NUD_NOARP | NUD_PERMANENT))) +- goto out; + if (neigh->dead) { + NL_SET_ERR_MSG(extack, "Neighbor entry is now dead"); ++ new = old; + goto out; + } ++ if (!(flags & NEIGH_UPDATE_F_ADMIN) && ++ (old & (NUD_NOARP | NUD_PERMANENT))) ++ goto out; + + ext_learn_change = neigh_update_ext_learned(neigh, flags, ¬ify); + +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c +index ca525cf681a4e..f64d1743b86d6 100644 +--- a/net/ipv4/ip_tunnel.c ++++ b/net/ipv4/ip_tunnel.c +@@ -317,7 +317,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev) + } + + dev->needed_headroom = t_hlen + hlen; +- mtu -= (dev->hard_header_len + t_hlen); ++ mtu -= t_hlen; + + if (mtu < IPV4_MIN_MTU) + mtu = IPV4_MIN_MTU; +@@ -347,7 +347,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net, + nt = netdev_priv(dev); + t_hlen = nt->hlen + sizeof(struct iphdr); + dev->min_mtu = ETH_MIN_MTU; +- dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen; ++ dev->max_mtu = IP_MAX_MTU - t_hlen; + ip_tunnel_add(itn, nt); + return nt; + +@@ -494,11 +494,10 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, + int mtu; + + tunnel_hlen = md ? tunnel_hlen : tunnel->hlen; +- pkt_size = skb->len - tunnel_hlen - dev->hard_header_len; ++ pkt_size = skb->len - tunnel_hlen; + + if (df) +- mtu = dst_mtu(&rt->dst) - dev->hard_header_len +- - sizeof(struct iphdr) - tunnel_hlen; ++ mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel_hlen); + else + mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; + +@@ -964,7 +963,7 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict) + { + struct ip_tunnel *tunnel = netdev_priv(dev); + int t_hlen = tunnel->hlen + sizeof(struct iphdr); +- int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen; ++ int max_mtu = IP_MAX_MTU - t_hlen; + + if (new_mtu < ETH_MIN_MTU) + return -EINVAL; +@@ -1141,10 +1140,9 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], + + mtu = ip_tunnel_bind_dev(dev); + if (tb[IFLA_MTU]) { +- unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen; ++ unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr)); + +- mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, +- (unsigned int)(max - sizeof(struct iphdr))); ++ mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max); + } + + err = dev_set_mtu(dev, mtu); +diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c +index 7a4d0715d1c32..a966d29c772d9 100644 +--- a/net/lapb/lapb_out.c ++++ b/net/lapb/lapb_out.c +@@ -82,7 +82,8 @@ void lapb_kick(struct lapb_cb *lapb) + skb = skb_dequeue(&lapb->write_queue); + + do { +- if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { ++ skbn = skb_copy(skb, GFP_ATOMIC); ++ if (!skbn) { + skb_queue_head(&lapb->write_queue, skb); + break; + } +diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c +index c9a8a2433e8ac..48322e45e7ddb 100644 +--- a/net/mac80211/driver-ops.c ++++ b/net/mac80211/driver-ops.c +@@ -125,8 +125,11 @@ int drv_sta_state(struct ieee80211_local *local, + } else if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC) { + ret = drv_sta_add(local, sdata, &sta->sta); +- if (ret == 0) ++ if (ret == 0) { + sta->uploaded = true; ++ if (rcu_access_pointer(sta->sta.rates)) ++ drv_sta_rate_tbl_update(local, sdata, &sta->sta); ++ } + } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH) { + drv_sta_remove(local, sdata, &sta->sta); +diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c +index b051f125d3af2..9841db84bce0a 100644 +--- a/net/mac80211/rate.c ++++ b/net/mac80211/rate.c +@@ -934,7 +934,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw, + if (old) + kfree_rcu(old, rcu_head); + +- drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); ++ if (sta->uploaded) ++ drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); + + ieee80211_sta_set_expected_throughput(pubsta, sta_get_expected_throughput(sta)); + +diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c +index 2921fc2767134..9bacec6653bac 100644 +--- a/net/rxrpc/af_rxrpc.c ++++ b/net/rxrpc/af_rxrpc.c +@@ -976,7 +976,7 @@ static int __init af_rxrpc_init(void) + goto error_security; + } + +- ret = register_pernet_subsys(&rxrpc_net_ops); ++ ret = register_pernet_device(&rxrpc_net_ops); + if (ret) + goto error_pernet; + +@@ -1021,7 +1021,7 @@ error_key_type: + error_sock: + proto_unregister(&rxrpc_proto); + error_proto: +- unregister_pernet_subsys(&rxrpc_net_ops); ++ unregister_pernet_device(&rxrpc_net_ops); + error_pernet: + rxrpc_exit_security(); + error_security: +@@ -1043,7 +1043,7 @@ static void __exit af_rxrpc_exit(void) + unregister_key_type(&key_type_rxrpc); + sock_unregister(PF_RXRPC); + proto_unregister(&rxrpc_proto); +- unregister_pernet_subsys(&rxrpc_net_ops); ++ unregister_pernet_device(&rxrpc_net_ops); + ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0); + ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0); + From 940541278206c39c54ce55dd7ca32ef98085fee2 Mon Sep 17 00:00:00 2001 From: Igor Pecovnik Date: Thu, 11 Feb 2021 12:35:41 +0100 Subject: [PATCH 3/7] sunxi - add upstream patches --- config/kernel/linux-sunxi-current.config | 2 +- config/kernel/linux-sunxi64-current.config | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/config/kernel/linux-sunxi-current.config b/config/kernel/linux-sunxi-current.config index d456c364b..d22788973 100644 --- a/config/kernel/linux-sunxi-current.config +++ b/config/kernel/linux-sunxi-current.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm 5.10.9 Kernel Configuration +# Linux/arm 5.10.15 Kernel Configuration # CONFIG_CC_VERSION_TEXT="arm-linux-gnueabihf-gcc (GNU Toolchain for the A-profile Architecture 8.3-2019.03 (arm-rel-8.36)) 8.3.0" CONFIG_CC_IS_GCC=y diff --git a/config/kernel/linux-sunxi64-current.config b/config/kernel/linux-sunxi64-current.config index 2b79f3ef8..66cd23d2f 100644 --- a/config/kernel/linux-sunxi64-current.config +++ b/config/kernel/linux-sunxi64-current.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 5.10.4 Kernel Configuration +# Linux/arm64 5.10.15 Kernel Configuration # CONFIG_CC_VERSION_TEXT="aarch64-linux-gnu-gcc (GNU Toolchain for the A-profile Architecture 8.3-2019.03 (arm-rel-8.36)) 8.3.0" CONFIG_CC_IS_GCC=y @@ -1850,6 +1850,7 @@ CONFIG_REGMAP_SPMI=m CONFIG_REGMAP_W1=m CONFIG_REGMAP_MMIO=y CONFIG_REGMAP_IRQ=y +CONFIG_REGMAP_SCCB=m CONFIG_DMA_SHARED_BUFFER=y # CONFIG_DMA_FENCE_TRACE is not set CONFIG_GENERIC_ARCH_TOPOLOGY=y @@ -4113,6 +4114,8 @@ CONFIG_VIDEO_CX25840=m # # Camera sensor devices # +CONFIG_VIDEO_APTINA_PLL=m +CONFIG_VIDEO_SMIAPP_PLL=m CONFIG_VIDEO_HI556=m CONFIG_VIDEO_IMX214=m CONFIG_VIDEO_IMX219=m From 8e4b84428dcd2f4ad19235c5fd45673e4494bc11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Igor=20Pe=C4=8Dovnik?= Date: Thu, 11 Feb 2021 12:37:54 +0100 Subject: [PATCH 4/7] Enable hardware PRNG/TRNG/SHA on sun8i-ce platform (#2616) * Enable hardware PRNG/TRNG/SHA on sun8i-ce platform * Update configs - change sunxi crypto to boolean --- config/kernel/linux-sunxi-current.config | 12 +++--- config/kernel/linux-sunxi-dev.config | 36 +++++++++++++----- config/kernel/linux-sunxi64-current.config | 34 +++++++++++------ config/kernel/linux-sunxi64-dev.config | 44 ++++++++++++++-------- 4 files changed, 83 insertions(+), 43 deletions(-) diff --git a/config/kernel/linux-sunxi-current.config b/config/kernel/linux-sunxi-current.config index d22788973..c41c31b53 100644 --- a/config/kernel/linux-sunxi-current.config +++ b/config/kernel/linux-sunxi-current.config @@ -7504,15 +7504,15 @@ CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m CONFIG_CRYPTO_LIB_SHA256=y CONFIG_CRYPTO_HW=y CONFIG_CRYPTO_DEV_ALLWINNER=y -CONFIG_CRYPTO_DEV_SUN4I_SS=m -# CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG is not set -CONFIG_CRYPTO_DEV_SUN8I_CE=m +CONFIG_CRYPTO_DEV_SUN4I_SS=y +CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG=y +CONFIG_CRYPTO_DEV_SUN8I_CE=y # CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG is not set -# CONFIG_CRYPTO_DEV_SUN8I_CE_HASH is not set +CONFIG_CRYPTO_DEV_SUN8I_CE_HASH=y CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG=y CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG=y -CONFIG_CRYPTO_DEV_SUN8I_SS=m -# CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG is not set +CONFIG_CRYPTO_DEV_SUN8I_SS=y +# CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG is not set CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG=y CONFIG_CRYPTO_DEV_SUN8I_SS_HASH=y # CONFIG_CRYPTO_DEV_ATMEL_ECC is not set diff --git a/config/kernel/linux-sunxi-dev.config b/config/kernel/linux-sunxi-dev.config index 7a778d0ef..ebf3f8989 100644 --- a/config/kernel/linux-sunxi-dev.config +++ b/config/kernel/linux-sunxi-dev.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm 5.10.3 Kernel Configuration +# Linux/arm 5.10.13 Kernel Configuration # CONFIG_CC_VERSION_TEXT="arm-linux-gnueabihf-gcc (GNU Toolchain for the A-profile Architecture 8.3-2019.03 (arm-rel-8.36)) 8.3.0" CONFIG_CC_IS_GCC=y @@ -2640,7 +2640,6 @@ CONFIG_WLAN_VENDOR_TI=y CONFIG_RTL8723DU=m CONFIG_RTL8723DS=m CONFIG_RTL8822BU=m -CONFIG_RTL8188EU=m CONFIG_RTL8821CU=m CONFIG_WLAN_VENDOR_XRADIO=m CONFIG_XRADIO_NON_POWER_OF_TWO_BLOCKSIZES=y @@ -2997,7 +2996,7 @@ CONFIG_SERIAL_DEV_CTRL_TTYPORT=y CONFIG_VIRTIO_CONSOLE=m # CONFIG_IPMI_HANDLER is not set # CONFIG_IPMB_DEVICE_INTERFACE is not set -CONFIG_HW_RANDOM=m +CONFIG_HW_RANDOM=y # CONFIG_HW_RANDOM_TIMERIOMEM is not set # CONFIG_HW_RANDOM_BA431 is not set CONFIG_HW_RANDOM_VIRTIO=m @@ -3657,7 +3656,6 @@ CONFIG_MFD_CPCAP=m # CONFIG_MFD_RN5T618 is not set # CONFIG_MFD_SEC_CORE is not set # CONFIG_MFD_SI476X_CORE is not set -# CONFIG_MFD_SL28CPLD is not set # CONFIG_MFD_SM501 is not set # CONFIG_MFD_SKY81452 is not set # CONFIG_ABX500_CORE is not set @@ -7091,6 +7089,24 @@ CONFIG_EROFS_FS_XATTR=y CONFIG_EROFS_FS_POSIX_ACL=y CONFIG_EROFS_FS_SECURITY=y # CONFIG_EROFS_FS_ZIP is not set +CONFIG_AUFS_FS=m +CONFIG_AUFS_BRANCH_MAX_127=y +# CONFIG_AUFS_BRANCH_MAX_511 is not set +# CONFIG_AUFS_BRANCH_MAX_1023 is not set +# CONFIG_AUFS_BRANCH_MAX_32767 is not set +CONFIG_AUFS_SBILIST=y +# CONFIG_AUFS_HNOTIFY is not set +# CONFIG_AUFS_EXPORT is not set +# CONFIG_AUFS_XATTR is not set +# CONFIG_AUFS_FHSM is not set +# CONFIG_AUFS_RDU is not set +# CONFIG_AUFS_DIRREN is not set +# CONFIG_AUFS_SHWH is not set +# CONFIG_AUFS_BR_RAMFS is not set +# CONFIG_AUFS_BR_FUSE is not set +CONFIG_AUFS_BR_HFSPLUS=y +CONFIG_AUFS_BDEV_LOOP=y +# CONFIG_AUFS_DEBUG is not set CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=m CONFIG_NFS_V2=m @@ -7335,7 +7351,7 @@ CONFIG_CRYPTO_CRYPTD=y CONFIG_CRYPTO_AUTHENC=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_SIMD=m -CONFIG_CRYPTO_ENGINE=m +CONFIG_CRYPTO_ENGINE=y # # Public-key cryptography @@ -7488,14 +7504,14 @@ CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m CONFIG_CRYPTO_LIB_SHA256=y CONFIG_CRYPTO_HW=y CONFIG_CRYPTO_DEV_ALLWINNER=y -CONFIG_CRYPTO_DEV_SUN4I_SS=m -# CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG is not set -CONFIG_CRYPTO_DEV_SUN8I_CE=m +CONFIG_CRYPTO_DEV_SUN4I_SS=y +CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG=y +CONFIG_CRYPTO_DEV_SUN8I_CE=y # CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG is not set -# CONFIG_CRYPTO_DEV_SUN8I_CE_HASH is not set +CONFIG_CRYPTO_DEV_SUN8I_CE_HASH=y CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG=y CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG=y -CONFIG_CRYPTO_DEV_SUN8I_SS=m +CONFIG_CRYPTO_DEV_SUN8I_SS=y # CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG is not set CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG=y CONFIG_CRYPTO_DEV_SUN8I_SS_HASH=y diff --git a/config/kernel/linux-sunxi64-current.config b/config/kernel/linux-sunxi64-current.config index 66cd23d2f..86d14f7d9 100644 --- a/config/kernel/linux-sunxi64-current.config +++ b/config/kernel/linux-sunxi64-current.config @@ -2504,9 +2504,11 @@ CONFIG_ATH9K_RFKILL=y CONFIG_ATH9K_PCOEM=y CONFIG_ATH9K_HTC=m # CONFIG_ATH9K_HTC_DEBUGFS is not set +CONFIG_ATH9K_HWRNG=y CONFIG_CARL9170=m CONFIG_CARL9170_LEDS=y CONFIG_CARL9170_WPC=y +CONFIG_CARL9170_HWRNG=y # CONFIG_ATH6KL is not set CONFIG_AR5523=m CONFIG_ATH10K=m @@ -2533,9 +2535,11 @@ CONFIG_B43_PHY_N=y CONFIG_B43_PHY_LP=y CONFIG_B43_PHY_HT=y CONFIG_B43_LEDS=y +CONFIG_B43_HWRNG=y # CONFIG_B43_DEBUG is not set CONFIG_B43LEGACY=m CONFIG_B43LEGACY_LEDS=y +CONFIG_B43LEGACY_HWRNG=y CONFIG_B43LEGACY_DEBUG=y CONFIG_B43LEGACY_DMA=y CONFIG_B43LEGACY_PIO=y @@ -2972,10 +2976,16 @@ CONFIG_SERIAL_DEV_CTRL_TTYPORT=y CONFIG_VIRTIO_CONSOLE=y # CONFIG_IPMI_HANDLER is not set # CONFIG_IPMB_DEVICE_INTERFACE is not set -# CONFIG_HW_RANDOM is not set +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_BA431=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_HW_RANDOM_CCTRNG=m +CONFIG_HW_RANDOM_XIPHERA=m CONFIG_DEVMEM=y # CONFIG_RAW_DRIVER is not set CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y CONFIG_TCG_TIS_CORE=m CONFIG_TCG_TIS=m CONFIG_TCG_TIS_SPI=m @@ -5006,6 +5016,7 @@ CONFIG_HID_THINGM=m CONFIG_HID_THRUSTMASTER=m CONFIG_THRUSTMASTER_FF=y # CONFIG_HID_UDRAW_PS3 is not set +CONFIG_HID_U2FZERO=m CONFIG_HID_WACOM=m CONFIG_HID_WIIMOTE=m CONFIG_HID_XINMO=m @@ -5252,6 +5263,7 @@ CONFIG_USB_HUB_USB251XB=m # CONFIG_USB_HSIC_USB3503 is not set # CONFIG_USB_HSIC_USB4604 is not set # CONFIG_USB_LINK_LAYER_TEST is not set +CONFIG_USB_CHAOSKEY=m # # USB Physical Layer drivers @@ -7175,7 +7187,7 @@ CONFIG_CRYPTO_CRYPTD=y CONFIG_CRYPTO_AUTHENC=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_SIMD=y -CONFIG_CRYPTO_ENGINE=m +CONFIG_CRYPTO_ENGINE=y # # Public-key cryptography @@ -7327,17 +7339,17 @@ CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m CONFIG_CRYPTO_LIB_SHA256=y CONFIG_CRYPTO_HW=y CONFIG_CRYPTO_DEV_ALLWINNER=y -CONFIG_CRYPTO_DEV_SUN4I_SS=m -# CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG is not set -CONFIG_CRYPTO_DEV_SUN8I_CE=m +CONFIG_CRYPTO_DEV_SUN4I_SS=y +CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG=y +CONFIG_CRYPTO_DEV_SUN8I_CE=y # CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG is not set -# CONFIG_CRYPTO_DEV_SUN8I_CE_HASH is not set -# CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG is not set -# CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG is not set -CONFIG_CRYPTO_DEV_SUN8I_SS=m +CONFIG_CRYPTO_DEV_SUN8I_CE_HASH=y +CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG=y +CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG=y +CONFIG_CRYPTO_DEV_SUN8I_SS=y # CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG is not set -# CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG is not set -# CONFIG_CRYPTO_DEV_SUN8I_SS_HASH is not set +CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG=y +CONFIG_CRYPTO_DEV_SUN8I_SS_HASH=y # CONFIG_CRYPTO_DEV_ATMEL_ECC is not set # CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set # CONFIG_CRYPTO_DEV_CCP is not set diff --git a/config/kernel/linux-sunxi64-dev.config b/config/kernel/linux-sunxi64-dev.config index 049121681..7d7113172 100644 --- a/config/kernel/linux-sunxi64-dev.config +++ b/config/kernel/linux-sunxi64-dev.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 5.10.3 Kernel Configuration +# Linux/arm64 5.10.13 Kernel Configuration # CONFIG_CC_VERSION_TEXT="aarch64-linux-gnu-gcc (GNU Toolchain for the A-profile Architecture 8.3-2019.03 (arm-rel-8.36)) 8.3.0" CONFIG_CC_IS_GCC=y @@ -1850,6 +1850,7 @@ CONFIG_REGMAP_SPMI=m CONFIG_REGMAP_W1=m CONFIG_REGMAP_MMIO=y CONFIG_REGMAP_IRQ=y +CONFIG_REGMAP_SCCB=m CONFIG_DMA_SHARED_BUFFER=y # CONFIG_DMA_FENCE_TRACE is not set CONFIG_GENERIC_ARCH_TOPOLOGY=y @@ -2503,9 +2504,11 @@ CONFIG_ATH9K_RFKILL=y CONFIG_ATH9K_PCOEM=y CONFIG_ATH9K_HTC=m # CONFIG_ATH9K_HTC_DEBUGFS is not set +CONFIG_ATH9K_HWRNG=y CONFIG_CARL9170=m CONFIG_CARL9170_LEDS=y CONFIG_CARL9170_WPC=y +CONFIG_CARL9170_HWRNG=y # CONFIG_ATH6KL is not set CONFIG_AR5523=m CONFIG_ATH10K=m @@ -2532,9 +2535,11 @@ CONFIG_B43_PHY_N=y CONFIG_B43_PHY_LP=y CONFIG_B43_PHY_HT=y CONFIG_B43_LEDS=y +CONFIG_B43_HWRNG=y # CONFIG_B43_DEBUG is not set CONFIG_B43LEGACY=m CONFIG_B43LEGACY_LEDS=y +CONFIG_B43LEGACY_HWRNG=y CONFIG_B43LEGACY_DEBUG=y CONFIG_B43LEGACY_DMA=y CONFIG_B43LEGACY_PIO=y @@ -2608,7 +2613,6 @@ CONFIG_RTW88=m CONFIG_RTL8723DU=m CONFIG_RTL8723DS=m CONFIG_RTL8822BU=m -CONFIG_RTL8188EU=m CONFIG_RTL8821CU=m # CONFIG_WLAN_VENDOR_XRADIO is not set CONFIG_88XXAU=m @@ -2972,10 +2976,16 @@ CONFIG_SERIAL_DEV_CTRL_TTYPORT=y CONFIG_VIRTIO_CONSOLE=y # CONFIG_IPMI_HANDLER is not set # CONFIG_IPMB_DEVICE_INTERFACE is not set -# CONFIG_HW_RANDOM is not set +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_BA431=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_HW_RANDOM_CCTRNG=m +CONFIG_HW_RANDOM_XIPHERA=m CONFIG_DEVMEM=y # CONFIG_RAW_DRIVER is not set CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y CONFIG_TCG_TIS_CORE=m CONFIG_TCG_TIS=m CONFIG_TCG_TIS_SPI=m @@ -3672,7 +3682,6 @@ CONFIG_MFD_MAX77650=m # CONFIG_MFD_RN5T618 is not set CONFIG_MFD_SEC_CORE=y # CONFIG_MFD_SI476X_CORE is not set -# CONFIG_MFD_SL28CPLD is not set # CONFIG_MFD_SM501 is not set # CONFIG_MFD_SKY81452 is not set # CONFIG_ABX500_CORE is not set @@ -4115,6 +4124,8 @@ CONFIG_VIDEO_CX25840=m # # Camera sensor devices # +CONFIG_VIDEO_APTINA_PLL=m +CONFIG_VIDEO_SMIAPP_PLL=m CONFIG_VIDEO_HI556=m CONFIG_VIDEO_IMX214=m CONFIG_VIDEO_IMX219=m @@ -4711,7 +4722,6 @@ CONFIG_SND_SOC_FSL_MICFIL=m # CONFIG_SND_I2S_HI6210_I2S is not set # CONFIG_SND_SOC_IMG is not set -# CONFIG_SND_SOC_INTEL_KEEMBAY is not set CONFIG_SND_SOC_MTK_BTCVSD=m # CONFIG_SND_SOC_SOF_TOPLEVEL is not set @@ -5006,6 +5016,7 @@ CONFIG_HID_THINGM=m CONFIG_HID_THRUSTMASTER=m CONFIG_THRUSTMASTER_FF=y # CONFIG_HID_UDRAW_PS3 is not set +CONFIG_HID_U2FZERO=m CONFIG_HID_WACOM=m CONFIG_HID_WIIMOTE=m CONFIG_HID_XINMO=m @@ -5252,6 +5263,7 @@ CONFIG_USB_HUB_USB251XB=m # CONFIG_USB_HSIC_USB3503 is not set # CONFIG_USB_HSIC_USB4604 is not set # CONFIG_USB_LINK_LAYER_TEST is not set +CONFIG_USB_CHAOSKEY=m # # USB Physical Layer drivers @@ -7175,7 +7187,7 @@ CONFIG_CRYPTO_CRYPTD=y CONFIG_CRYPTO_AUTHENC=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_SIMD=y -CONFIG_CRYPTO_ENGINE=m +CONFIG_CRYPTO_ENGINE=y # # Public-key cryptography @@ -7284,7 +7296,7 @@ CONFIG_CRYPTO_LZO=y CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -CONFIG_CRYPTO_ZSTD=m +CONFIG_CRYPTO_ZSTD=y # # Random Number Generation @@ -7327,17 +7339,17 @@ CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m CONFIG_CRYPTO_LIB_SHA256=y CONFIG_CRYPTO_HW=y CONFIG_CRYPTO_DEV_ALLWINNER=y -CONFIG_CRYPTO_DEV_SUN4I_SS=m -# CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG is not set -CONFIG_CRYPTO_DEV_SUN8I_CE=m +CONFIG_CRYPTO_DEV_SUN4I_SS=y +CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG=y +CONFIG_CRYPTO_DEV_SUN8I_CE=y # CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG is not set -# CONFIG_CRYPTO_DEV_SUN8I_CE_HASH is not set -# CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG is not set -# CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG is not set -CONFIG_CRYPTO_DEV_SUN8I_SS=m +CONFIG_CRYPTO_DEV_SUN8I_CE_HASH=y +CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG=y +CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG=y +CONFIG_CRYPTO_DEV_SUN8I_SS=y # CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG is not set -# CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG is not set -# CONFIG_CRYPTO_DEV_SUN8I_SS_HASH is not set +CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG=y +CONFIG_CRYPTO_DEV_SUN8I_SS_HASH=y # CONFIG_CRYPTO_DEV_ATMEL_ECC is not set # CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set # CONFIG_CRYPTO_DEV_CCP is not set From d2fe129f3ae9a2cbed125a56db7a399412114a34 Mon Sep 17 00:00:00 2001 From: Igor Pecovnik Date: Thu, 11 Feb 2021 17:23:41 +0100 Subject: [PATCH 5/7] Update kernel config, add upstream patches --- config/kernel/linux-imx6-current.config | 18 +- .../sunxi-current/patch-5.10.13-14.patch | 1728 +++++++ .../sunxi-current/patch-5.10.14-15.patch | 4339 +++++++++++++++++ 3 files changed, 6076 insertions(+), 9 deletions(-) create mode 100644 patch/kernel/sunxi-current/patch-5.10.13-14.patch create mode 100644 patch/kernel/sunxi-current/patch-5.10.14-15.patch diff --git a/config/kernel/linux-imx6-current.config b/config/kernel/linux-imx6-current.config index 78b99821a..ef29ccf9d 100644 --- a/config/kernel/linux-imx6-current.config +++ b/config/kernel/linux-imx6-current.config @@ -1,7 +1,6 @@ # # Automatically generated file; DO NOT EDIT. - -# Linux/arm 5.10.6 Kernel Configuration +# Linux/arm 5.10.15 Kernel Configuration # CONFIG_CC_VERSION_TEXT="arm-linux-gnueabihf-gcc (GNU Toolchain for the A-profile Architecture 8.3-2019.03 (arm-rel-8.36)) 8.3.0" CONFIG_CC_IS_GCC=y @@ -10249,14 +10248,14 @@ CONFIG_CRYPTO_DEV_ALLWINNER=y CONFIG_CRYPTO_DEV_SUN4I_SS=m CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG=y CONFIG_CRYPTO_DEV_SUN8I_CE=m -# CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG is not set -# CONFIG_CRYPTO_DEV_SUN8I_CE_HASH is not set -# CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG is not set -# CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG is not set +CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG=y +CONFIG_CRYPTO_DEV_SUN8I_CE_HASH=y +CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG=y +CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG=y CONFIG_CRYPTO_DEV_SUN8I_SS=m -# CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG is not set -# CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG is not set -# CONFIG_CRYPTO_DEV_SUN8I_SS_HASH is not set +CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG=y +CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG=y +CONFIG_CRYPTO_DEV_SUN8I_SS_HASH=y CONFIG_CRYPTO_DEV_HIFN_795X=m CONFIG_CRYPTO_DEV_HIFN_795X_RNG=y CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON=m @@ -10664,6 +10663,7 @@ CONFIG_BRANCH_PROFILE_NONE=y # CONFIG_PROFILE_ANNOTATED_BRANCHES is not set CONFIG_BLK_DEV_IO_TRACE=y CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set CONFIG_UPROBE_EVENTS=y CONFIG_BPF_EVENTS=y CONFIG_DYNAMIC_EVENTS=y diff --git a/patch/kernel/sunxi-current/patch-5.10.13-14.patch b/patch/kernel/sunxi-current/patch-5.10.13-14.patch new file mode 100644 index 000000000..0533261bf --- /dev/null +++ b/patch/kernel/sunxi-current/patch-5.10.13-14.patch @@ -0,0 +1,1728 @@ +diff --git a/Makefile b/Makefile +index a2d5e953ea40a..bb3770be9779d 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 10 +-SUBLEVEL = 13 ++SUBLEVEL = 14 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig +index 65e4482e38498..02692fbe2db5c 100644 +--- a/arch/arm/mm/Kconfig ++++ b/arch/arm/mm/Kconfig +@@ -743,6 +743,7 @@ config SWP_EMULATE + config CPU_BIG_ENDIAN + bool "Build big-endian kernel" + depends on ARCH_SUPPORTS_BIG_ENDIAN ++ depends on !LD_IS_LLD + help + Say Y if you plan on running a kernel in big-endian mode. + Note that your board must be properly built and your board +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi +index 9b8548e5f6e51..ee8fcae9f9f00 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi ++++ b/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi +@@ -135,3 +135,7 @@ + }; + }; + }; ++ ++&mali { ++ dma-coherent; ++}; +diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h +index cd61239bae8c2..75c8e9a350cc7 100644 +--- a/arch/arm64/include/asm/memory.h ++++ b/arch/arm64/include/asm/memory.h +@@ -238,11 +238,11 @@ static inline const void *__tag_set(const void *addr, u8 tag) + + + /* +- * The linear kernel range starts at the bottom of the virtual address +- * space. Testing the top bit for the start of the region is a +- * sufficient check and avoids having to worry about the tag. ++ * Check whether an arbitrary address is within the linear map, which ++ * lives in the [PAGE_OFFSET, PAGE_END) interval at the bottom of the ++ * kernel's TTBR1 address range. + */ +-#define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1))) ++#define __is_lm_address(addr) (((u64)(addr) ^ PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET)) + + #define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) + #define __kimg_to_phys(addr) ((addr) - kimage_voffset) +@@ -323,7 +323,7 @@ static inline void *phys_to_virt(phys_addr_t x) + #endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */ + + #define virt_addr_valid(addr) ({ \ +- __typeof__(addr) __addr = addr; \ ++ __typeof__(addr) __addr = __tag_reset(addr); \ + __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \ + }) + +diff --git a/arch/arm64/mm/physaddr.c b/arch/arm64/mm/physaddr.c +index 67a9ba9eaa96b..cde44c13dda1b 100644 +--- a/arch/arm64/mm/physaddr.c ++++ b/arch/arm64/mm/physaddr.c +@@ -9,7 +9,7 @@ + + phys_addr_t __virt_to_phys(unsigned long x) + { +- WARN(!__is_lm_address(x), ++ WARN(!__is_lm_address(__tag_reset(x)), + "virt_to_phys used for non-linear address: %pK (%pS)\n", + (void *)x, + (void *)x); +diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h +index 5e658ba2654a7..9abe842dbd843 100644 +--- a/arch/x86/include/asm/intel-family.h ++++ b/arch/x86/include/asm/intel-family.h +@@ -97,6 +97,7 @@ + + #define INTEL_FAM6_LAKEFIELD 0x8A + #define INTEL_FAM6_ALDERLAKE 0x97 ++#define INTEL_FAM6_ALDERLAKE_L 0x9A + + /* "Small Core" Processors (Atom) */ + +diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h +index 0b4920a7238e3..e16cccdd04207 100644 +--- a/arch/x86/include/asm/msr.h ++++ b/arch/x86/include/asm/msr.h +@@ -86,7 +86,7 @@ static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {} + * think of extending them - you will be slapped with a stinking trout or a frozen + * shark will reach you, wherever you are! You've been warned. + */ +-static inline unsigned long long notrace __rdmsr(unsigned int msr) ++static __always_inline unsigned long long __rdmsr(unsigned int msr) + { + DECLARE_ARGS(val, low, high); + +@@ -98,7 +98,7 @@ static inline unsigned long long notrace __rdmsr(unsigned int msr) + return EAX_EDX_VAL(val, low, high); + } + +-static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high) ++static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high) + { + asm volatile("1: wrmsr\n" + "2:\n" +diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c +index 098015b739993..84f581c91db45 100644 +--- a/arch/x86/kernel/setup.c ++++ b/arch/x86/kernel/setup.c +@@ -665,6 +665,17 @@ static void __init trim_platform_memory_ranges(void) + + static void __init trim_bios_range(void) + { ++ /* ++ * A special case is the first 4Kb of memory; ++ * This is a BIOS owned area, not kernel ram, but generally ++ * not listed as such in the E820 table. ++ * ++ * This typically reserves additional memory (64KiB by default) ++ * since some BIOSes are known to corrupt low memory. See the ++ * Kconfig help text for X86_RESERVE_LOW. ++ */ ++ e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED); ++ + /* + * special case: Some BIOSes report the PC BIOS + * area (640Kb -> 1Mb) as RAM even though it is not. +@@ -722,15 +733,6 @@ early_param("reservelow", parse_reservelow); + + static void __init trim_low_memory_range(void) + { +- /* +- * A special case is the first 4Kb of memory; +- * This is a BIOS owned area, not kernel ram, but generally +- * not listed as such in the E820 table. +- * +- * This typically reserves additional memory (64KiB by default) +- * since some BIOSes are known to corrupt low memory. See the +- * Kconfig help text for X86_RESERVE_LOW. +- */ + memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE)); + } + +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +index b0e9b0509568c..95d883482227e 100644 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +@@ -239,6 +239,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base, + struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; + bool force_reset = false; + bool update_uclk = false; ++ bool p_state_change_support; + + if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present) + return; +@@ -279,8 +280,9 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base, + clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz; + + clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; +- if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { +- clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support; ++ p_state_change_support = new_clocks->p_state_change_support || (display_count == 0); ++ if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { ++ clk_mgr_base->clks.p_state_change_support = p_state_change_support; + + /* to disable P-State switching, set UCLK min = max */ + if (!clk_mgr_base->clks.p_state_change_support) +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +index 98464886341f6..17e6fd8201395 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +@@ -2375,6 +2375,9 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting + initial_link_setting; + uint32_t link_bw; + ++ if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) ++ return false; ++ + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing +@@ -3020,14 +3023,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && +- pipe_ctx->stream->link == link) ++ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) + core_link_disable_stream(pipe_ctx); + } + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && +- pipe_ctx->stream->link == link) ++ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) + core_link_enable_stream(link->dc->current_state, pipe_ctx); + } + +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +index d0f3bf953d027..0d1e7b56fb395 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +@@ -646,8 +646,13 @@ static void power_on_plane( + if (REG(DC_IP_REQUEST_CNTL)) { + REG_SET(DC_IP_REQUEST_CNTL, 0, + IP_REQUEST_EN, 1); +- hws->funcs.dpp_pg_control(hws, plane_id, true); +- hws->funcs.hubp_pg_control(hws, plane_id, true); ++ ++ if (hws->funcs.dpp_pg_control) ++ hws->funcs.dpp_pg_control(hws, plane_id, true); ++ ++ if (hws->funcs.hubp_pg_control) ++ hws->funcs.hubp_pg_control(hws, plane_id, true); ++ + REG_SET(DC_IP_REQUEST_CNTL, 0, + IP_REQUEST_EN, 0); + DC_LOG_DEBUG( +@@ -1079,8 +1084,13 @@ void dcn10_plane_atomic_power_down(struct dc *dc, + if (REG(DC_IP_REQUEST_CNTL)) { + REG_SET(DC_IP_REQUEST_CNTL, 0, + IP_REQUEST_EN, 1); +- hws->funcs.dpp_pg_control(hws, dpp->inst, false); +- hws->funcs.hubp_pg_control(hws, hubp->inst, false); ++ ++ if (hws->funcs.dpp_pg_control) ++ hws->funcs.dpp_pg_control(hws, dpp->inst, false); ++ ++ if (hws->funcs.hubp_pg_control) ++ hws->funcs.hubp_pg_control(hws, hubp->inst, false); ++ + dpp->funcs->dpp_reset(dpp); + REG_SET(DC_IP_REQUEST_CNTL, 0, + IP_REQUEST_EN, 0); +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +index 01530e686f437..f1e9b3b06b924 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +@@ -1069,8 +1069,13 @@ static void dcn20_power_on_plane( + if (REG(DC_IP_REQUEST_CNTL)) { + REG_SET(DC_IP_REQUEST_CNTL, 0, + IP_REQUEST_EN, 1); +- dcn20_dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true); +- dcn20_hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true); ++ ++ if (hws->funcs.dpp_pg_control) ++ hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true); ++ ++ if (hws->funcs.hubp_pg_control) ++ hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true); ++ + REG_SET(DC_IP_REQUEST_CNTL, 0, + IP_REQUEST_EN, 0); + DC_LOG_DEBUG( +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +index e73785e74cba8..20441127783ba 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +@@ -295,7 +295,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { + .num_banks = 8, + .num_chans = 4, + .vmm_page_size_bytes = 4096, +- .dram_clock_change_latency_us = 23.84, ++ .dram_clock_change_latency_us = 11.72, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3600, + .xfc_bus_transport_time_us = 4, +diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h +index 67f9f66904be2..597cf1459b0a8 100644 +--- a/drivers/gpu/drm/panfrost/panfrost_device.h ++++ b/drivers/gpu/drm/panfrost/panfrost_device.h +@@ -88,6 +88,7 @@ struct panfrost_device { + /* pm_domains for devices with more than one. */ + struct device *pm_domain_devs[MAX_PM_DOMAINS]; + struct device_link *pm_domain_links[MAX_PM_DOMAINS]; ++ bool coherent; + + struct panfrost_features features; + const struct panfrost_compatible *comp; +diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c +index 0fc084110e5ba..689be734ed200 100644 +--- a/drivers/gpu/drm/panfrost/panfrost_drv.c ++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c +@@ -587,6 +587,8 @@ static int panfrost_probe(struct platform_device *pdev) + if (!pfdev->comp) + return -ENODEV; + ++ pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT; ++ + /* Allocate and initialze the DRM device. */ + ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev); + if (IS_ERR(ddev)) +diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c +index 62d4d710a5711..57a31dd0ffed1 100644 +--- a/drivers/gpu/drm/panfrost/panfrost_gem.c ++++ b/drivers/gpu/drm/panfrost/panfrost_gem.c +@@ -218,6 +218,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = { + */ + struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size) + { ++ struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_gem_object *obj; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); +@@ -227,6 +228,7 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t + INIT_LIST_HEAD(&obj->mappings.list); + mutex_init(&obj->mappings.lock); + obj->base.base.funcs = &panfrost_gem_funcs; ++ obj->base.map_cached = pfdev->coherent; + + return &obj->base.base; + } +diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c +index 776448c527ea9..be8d68fb0e11e 100644 +--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c ++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c +@@ -371,6 +371,7 @@ int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv) + .pgsize_bitmap = SZ_4K | SZ_2M, + .ias = FIELD_GET(0xff, pfdev->features.mmu_features), + .oas = FIELD_GET(0xff00, pfdev->features.mmu_features), ++ .coherent_walk = pfdev->coherent, + .tlb = &mmu_tlb_ops, + .iommu_dev = pfdev->dev, + }; +diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c +index 0727383f49402..8b113ae32dc71 100644 +--- a/drivers/i2c/busses/i2c-tegra.c ++++ b/drivers/i2c/busses/i2c-tegra.c +@@ -326,6 +326,8 @@ static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned int reg) + /* read back register to make sure that register writes completed */ + if (reg != I2C_TX_FIFO) + readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); ++ else if (i2c_dev->is_vi) ++ readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, I2C_INT_STATUS)); + } + + static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned int reg) +@@ -339,6 +341,21 @@ static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data, + writesl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len); + } + ++static void i2c_writesl_vi(struct tegra_i2c_dev *i2c_dev, void *data, ++ unsigned int reg, unsigned int len) ++{ ++ u32 *data32 = data; ++ ++ /* ++ * VI I2C controller has known hardware bug where writes get stuck ++ * when immediate multiple writes happen to TX_FIFO register. ++ * Recommended software work around is to read I2C register after ++ * each write to TX_FIFO register to flush out the data. ++ */ ++ while (len--) ++ i2c_writel(i2c_dev, *data32++, reg); ++} ++ + static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data, + unsigned int reg, unsigned int len) + { +@@ -811,7 +828,10 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev) + i2c_dev->msg_buf_remaining = buf_remaining; + i2c_dev->msg_buf = buf + words_to_transfer * BYTES_PER_FIFO_WORD; + +- i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); ++ if (i2c_dev->is_vi) ++ i2c_writesl_vi(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); ++ else ++ i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); + + buf += words_to_transfer * BYTES_PER_FIFO_WORD; + } +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c +index 151243fa01ba5..7e3db4c0324d3 100644 +--- a/drivers/iommu/intel/iommu.c ++++ b/drivers/iommu/intel/iommu.c +@@ -3350,6 +3350,11 @@ static int __init init_dmars(void) + + if (!ecap_pass_through(iommu->ecap)) + hw_pass_through = 0; ++ ++ if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) { ++ pr_warn("Disable batched IOTLB flush due to virtualization"); ++ intel_iommu_strict = 1; ++ } + intel_svm_check(iommu); + } + +diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c +index a7a9bc08dcd11..bcfbd0e44a4a0 100644 +--- a/drivers/iommu/io-pgtable-arm.c ++++ b/drivers/iommu/io-pgtable-arm.c +@@ -417,7 +417,13 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, + << ARM_LPAE_PTE_ATTRINDX_SHIFT); + } + +- if (prot & IOMMU_CACHE) ++ /* ++ * Also Mali has its own notions of shareability wherein its Inner ++ * domain covers the cores within the GPU, and its Outer domain is ++ * "outside the GPU" (i.e. either the Inner or System domain in CPU ++ * terms, depending on coherency). ++ */ ++ if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE) + pte |= ARM_LPAE_PTE_SH_IS; + else + pte |= ARM_LPAE_PTE_SH_OS; +@@ -1021,6 +1027,9 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) + cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) | + ARM_MALI_LPAE_TTBR_READ_INNER | + ARM_MALI_LPAE_TTBR_ADRMODE_TABLE; ++ if (cfg->coherent_walk) ++ cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER; ++ + return &data->iop; + + out_free_data: +diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c +index 09c328ee65da8..71b3a4d5adc65 100644 +--- a/drivers/misc/habanalabs/common/device.c ++++ b/drivers/misc/habanalabs/common/device.c +@@ -1425,6 +1425,15 @@ void hl_device_fini(struct hl_device *hdev) + } + } + ++ /* Disable PCI access from device F/W so it won't send us additional ++ * interrupts. We disable MSI/MSI-X at the halt_engines function and we ++ * can't have the F/W sending us interrupts after that. We need to ++ * disable the access here because if the device is marked disable, the ++ * message won't be send. Also, in case of heartbeat, the device CPU is ++ * marked as disable so this message won't be sent ++ */ ++ hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS); ++ + /* Mark device as disabled */ + hdev->disabled = true; + +diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c +index cd41c7ceb0e78..13c6eebd4fa63 100644 +--- a/drivers/misc/habanalabs/common/firmware_if.c ++++ b/drivers/misc/habanalabs/common/firmware_if.c +@@ -385,6 +385,10 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev, + } + counters->rx_throughput = result; + ++ memset(&pkt, 0, sizeof(pkt)); ++ pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET << ++ CPUCP_PKT_CTL_OPCODE_SHIFT); ++ + /* Fetch PCI tx counter */ + pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx); + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), +@@ -397,6 +401,7 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev, + counters->tx_throughput = result; + + /* Fetch PCI replay counter */ ++ memset(&pkt, 0, sizeof(pkt)); + pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + +diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c +index 07317ea491295..35401148969f5 100644 +--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c ++++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c +@@ -133,6 +133,8 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args) + + hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev, + &hw_idle.busy_engines_mask_ext, NULL); ++ hw_idle.busy_engines_mask = ++ lower_32_bits(hw_idle.busy_engines_mask_ext); + + return copy_to_user(out, &hw_idle, + min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0; +diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c +index ed1bd41262ecd..68f661aca3ff2 100644 +--- a/drivers/misc/habanalabs/gaudi/gaudi.c ++++ b/drivers/misc/habanalabs/gaudi/gaudi.c +@@ -3119,7 +3119,8 @@ static int gaudi_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma, + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | + VM_DONTCOPY | VM_NORESERVE; + +- rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size); ++ rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, ++ (dma_addr - HOST_PHYS_BASE), size); + if (rc) + dev_err(hdev->dev, "dma_mmap_coherent error %d", rc); + +diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c +index 235d47b2420f5..986ed3c072088 100644 +--- a/drivers/misc/habanalabs/goya/goya.c ++++ b/drivers/misc/habanalabs/goya/goya.c +@@ -2675,7 +2675,8 @@ static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma, + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | + VM_DONTCOPY | VM_NORESERVE; + +- rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size); ++ rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, ++ (dma_addr - HOST_PHYS_BASE), size); + if (rc) + dev_err(hdev->dev, "dma_mmap_coherent error %d", rc); + +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c +index 1e9a0adda2d69..445226720ff29 100644 +--- a/drivers/net/dsa/bcm_sf2.c ++++ b/drivers/net/dsa/bcm_sf2.c +@@ -509,15 +509,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) + /* Find our integrated MDIO bus node */ + dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio"); + priv->master_mii_bus = of_mdio_find_bus(dn); +- if (!priv->master_mii_bus) ++ if (!priv->master_mii_bus) { ++ of_node_put(dn); + return -EPROBE_DEFER; ++ } + + get_device(&priv->master_mii_bus->dev); + priv->master_mii_dn = dn; + + priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev); +- if (!priv->slave_mii_bus) ++ if (!priv->slave_mii_bus) { ++ of_node_put(dn); + return -ENOMEM; ++ } + + priv->slave_mii_bus->priv = priv; + priv->slave_mii_bus->name = "sf2 slave mii"; +diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c +index 0ef854911f215..d4a64dbde3157 100644 +--- a/drivers/net/dsa/microchip/ksz_common.c ++++ b/drivers/net/dsa/microchip/ksz_common.c +@@ -400,7 +400,7 @@ int ksz_switch_register(struct ksz_device *dev, + gpiod_set_value_cansleep(dev->reset_gpio, 1); + usleep_range(10000, 12000); + gpiod_set_value_cansleep(dev->reset_gpio, 0); +- usleep_range(100, 1000); ++ msleep(100); + } + + mutex_init(&dev->dev_mutex); +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c +index 04f24c66cf366..55c28fbc5f9ea 100644 +--- a/drivers/net/ethernet/freescale/fec_main.c ++++ b/drivers/net/ethernet/freescale/fec_main.c +@@ -2165,9 +2165,9 @@ static int fec_enet_mii_init(struct platform_device *pdev) + fep->mii_bus->parent = &pdev->dev; + + err = of_mdiobus_register(fep->mii_bus, node); +- of_node_put(node); + if (err) + goto err_out_free_mdiobus; ++ of_node_put(node); + + mii_cnt++; + +@@ -2180,6 +2180,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) + err_out_free_mdiobus: + mdiobus_free(fep->mii_bus); + err_out: ++ of_node_put(node); + return err; + } + +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c +index e2540cc00d34e..627ce1a20473a 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -4979,6 +4979,12 @@ static void ibmvnic_tasklet(struct tasklet_struct *t) + while (!done) { + /* Pull all the valid messages off the CRQ */ + while ((crq = ibmvnic_next_crq(adapter)) != NULL) { ++ /* This barrier makes sure ibmvnic_next_crq()'s ++ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded ++ * before ibmvnic_handle_crq()'s ++ * switch(gen_crq->first) and switch(gen_crq->cmd). ++ */ ++ dma_rmb(); + ibmvnic_handle_crq(crq, adapter); + crq->generic.first = 0; + } +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +index d2581090f9a40..df238e46e2aeb 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +@@ -473,10 +473,11 @@ dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool) + dma_addr_t iova; + u8 *buf; + +- buf = napi_alloc_frag(pool->rbsize); ++ buf = napi_alloc_frag(pool->rbsize + OTX2_ALIGN); + if (unlikely(!buf)) + return -ENOMEM; + ++ buf = PTR_ALIGN(buf, OTX2_ALIGN); + iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize, + DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + if (unlikely(dma_mapping_error(pfvf->dev, iova))) { +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +index c6c5826aba41e..1892cea05ee7c 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +@@ -157,6 +157,7 @@ mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) + + static const + struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = { ++ .is_static = true, + .can_handle = mlxsw_sp1_span_cpu_can_handle, + .parms_set = mlxsw_sp1_span_entry_cpu_parms, + .configure = mlxsw_sp1_span_entry_cpu_configure, +@@ -214,6 +215,7 @@ mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry) + + static const + struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = { ++ .is_static = true, + .can_handle = mlxsw_sp_port_dev_check, + .parms_set = mlxsw_sp_span_entry_phys_parms, + .configure = mlxsw_sp_span_entry_phys_configure, +@@ -721,6 +723,7 @@ mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) + + static const + struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = { ++ .is_static = true, + .can_handle = mlxsw_sp2_span_cpu_can_handle, + .parms_set = mlxsw_sp2_span_entry_cpu_parms, + .configure = mlxsw_sp2_span_entry_cpu_configure, +@@ -1036,6 +1039,9 @@ static void mlxsw_sp_span_respin_work(struct work_struct *work) + if (!refcount_read(&curr->ref_count)) + continue; + ++ if (curr->ops->is_static) ++ continue; ++ + err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms); + if (err) + continue; +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h +index d907718bc8c58..aa1cd409c0e2e 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h +@@ -60,6 +60,7 @@ struct mlxsw_sp_span_entry { + }; + + struct mlxsw_sp_span_entry_ops { ++ bool is_static; + bool (*can_handle)(const struct net_device *to_dev); + int (*parms_set)(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c +index 82b1c7a5a7a94..ba0e4d2b256a4 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c +@@ -129,7 +129,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev) + if (ret) { + dev_err(&pdev->dev, + "Failed to set tx_clk\n"); +- return ret; ++ goto err_remove_config_dt; + } + } + } +@@ -143,7 +143,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev) + if (ret) { + dev_err(&pdev->dev, + "Failed to set clk_ptp_ref\n"); +- return ret; ++ goto err_remove_config_dt; + } + } + } +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +index 9a6a519426a08..103d2448e9e0d 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +@@ -375,6 +375,7 @@ static int ehl_pse0_common_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) + { + plat->bus_id = 2; ++ plat->addr64 = 32; + return ehl_common_data(pdev, plat); + } + +@@ -406,6 +407,7 @@ static int ehl_pse1_common_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) + { + plat->bus_id = 3; ++ plat->addr64 = 32; + return ehl_common_data(pdev, plat); + } + +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index 34cb59b2fcd67..4ec5f05dabe1d 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -1489,8 +1489,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) + } + + length = (io.nblocks + 1) << ns->lba_shift; +- meta_len = (io.nblocks + 1) * ns->ms; +- metadata = nvme_to_user_ptr(io.metadata); ++ ++ if ((io.control & NVME_RW_PRINFO_PRACT) && ++ ns->ms == sizeof(struct t10_pi_tuple)) { ++ /* ++ * Protection information is stripped/inserted by the ++ * controller. ++ */ ++ if (nvme_to_user_ptr(io.metadata)) ++ return -EINVAL; ++ meta_len = 0; ++ metadata = NULL; ++ } else { ++ meta_len = (io.nblocks + 1) * ns->ms; ++ metadata = nvme_to_user_ptr(io.metadata); ++ } + + if (ns->features & NVME_NS_EXT_LBAS) { + length += meta_len; +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index 77f615568194d..a3486c1c27f0c 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -1825,6 +1826,9 @@ static void nvme_map_cmb(struct nvme_dev *dev) + if (dev->cmb_size) + return; + ++ if (NVME_CAP_CMBS(dev->ctrl.cap)) ++ writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC); ++ + dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); + if (!dev->cmbsz) + return; +@@ -1838,6 +1842,16 @@ static void nvme_map_cmb(struct nvme_dev *dev) + if (offset > bar_size) + return; + ++ /* ++ * Tell the controller about the host side address mapping the CMB, ++ * and enable CMB decoding for the NVMe 1.4+ scheme: ++ */ ++ if (NVME_CAP_CMBS(dev->ctrl.cap)) { ++ hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE | ++ (pci_bus_address(pdev, bar) + offset), ++ dev->bar + NVME_REG_CMBMSC); ++ } ++ + /* + * Controllers may support a CMB size larger than their BAR, + * for example, due to being behind a bridge. Reduce the CMB to +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c +index 65e3d0ef36e1a..493ed7ba86ed2 100644 +--- a/drivers/nvme/host/rdma.c ++++ b/drivers/nvme/host/rdma.c +@@ -97,6 +97,7 @@ struct nvme_rdma_queue { + struct completion cm_done; + bool pi_support; + int cq_size; ++ struct mutex queue_lock; + }; + + struct nvme_rdma_ctrl { +@@ -579,6 +580,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, + int ret; + + queue = &ctrl->queues[idx]; ++ mutex_init(&queue->queue_lock); + queue->ctrl = ctrl; + if (idx && ctrl->ctrl.max_integrity_segments) + queue->pi_support = true; +@@ -598,7 +600,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, + if (IS_ERR(queue->cm_id)) { + dev_info(ctrl->ctrl.device, + "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id)); +- return PTR_ERR(queue->cm_id); ++ ret = PTR_ERR(queue->cm_id); ++ goto out_destroy_mutex; + } + + if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) +@@ -628,6 +631,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, + out_destroy_cm_id: + rdma_destroy_id(queue->cm_id); + nvme_rdma_destroy_queue_ib(queue); ++out_destroy_mutex: ++ mutex_destroy(&queue->queue_lock); + return ret; + } + +@@ -639,9 +644,10 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) + + static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) + { +- if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) +- return; +- __nvme_rdma_stop_queue(queue); ++ mutex_lock(&queue->queue_lock); ++ if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) ++ __nvme_rdma_stop_queue(queue); ++ mutex_unlock(&queue->queue_lock); + } + + static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) +@@ -651,6 +657,7 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) + + nvme_rdma_destroy_queue_ib(queue); + rdma_destroy_id(queue->cm_id); ++ mutex_destroy(&queue->queue_lock); + } + + static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl) +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c +index 81db2331f6d78..6487b7897d1fb 100644 +--- a/drivers/nvme/host/tcp.c ++++ b/drivers/nvme/host/tcp.c +@@ -76,6 +76,7 @@ struct nvme_tcp_queue { + struct work_struct io_work; + int io_cpu; + ++ struct mutex queue_lock; + struct mutex send_mutex; + struct llist_head req_list; + struct list_head send_list; +@@ -1219,6 +1220,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) + + sock_release(queue->sock); + kfree(queue->pdu); ++ mutex_destroy(&queue->queue_lock); + } + + static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) +@@ -1380,6 +1382,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, + struct nvme_tcp_queue *queue = &ctrl->queues[qid]; + int ret, rcv_pdu_size; + ++ mutex_init(&queue->queue_lock); + queue->ctrl = ctrl; + init_llist_head(&queue->req_list); + INIT_LIST_HEAD(&queue->send_list); +@@ -1398,7 +1401,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, + if (ret) { + dev_err(nctrl->device, + "failed to create socket: %d\n", ret); +- return ret; ++ goto err_destroy_mutex; + } + + /* Single syn retry */ +@@ -1507,6 +1510,8 @@ err_crypto: + err_sock: + sock_release(queue->sock); + queue->sock = NULL; ++err_destroy_mutex: ++ mutex_destroy(&queue->queue_lock); + return ret; + } + +@@ -1534,9 +1539,10 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) + struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); + struct nvme_tcp_queue *queue = &ctrl->queues[qid]; + +- if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) +- return; +- __nvme_tcp_stop_queue(queue); ++ mutex_lock(&queue->queue_lock); ++ if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) ++ __nvme_tcp_stop_queue(queue); ++ mutex_unlock(&queue->queue_lock); + } + + static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx) +diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c +index dca34489a1dc9..92ca23bc8dbfc 100644 +--- a/drivers/nvme/target/admin-cmd.c ++++ b/drivers/nvme/target/admin-cmd.c +@@ -487,8 +487,10 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) + + /* return an all zeroed buffer if we can't find an active namespace */ + ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid); +- if (!ns) ++ if (!ns) { ++ status = NVME_SC_INVALID_NS; + goto done; ++ } + + nvmet_ns_revalidate(ns); + +@@ -541,7 +543,9 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) + id->nsattr |= (1 << 0); + nvmet_put_namespace(ns); + done: +- status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); ++ if (!status) ++ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); ++ + kfree(id); + out: + nvmet_req_complete(req, status); +diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c +index 442522ba487f0..4728e2bff6620 100644 +--- a/drivers/phy/motorola/phy-cpcap-usb.c ++++ b/drivers/phy/motorola/phy-cpcap-usb.c +@@ -662,35 +662,42 @@ static int cpcap_usb_phy_probe(struct platform_device *pdev) + generic_phy = devm_phy_create(ddata->dev, NULL, &ops); + if (IS_ERR(generic_phy)) { + error = PTR_ERR(generic_phy); +- return PTR_ERR(generic_phy); ++ goto out_reg_disable; + } + + phy_set_drvdata(generic_phy, ddata); + + phy_provider = devm_of_phy_provider_register(ddata->dev, + of_phy_simple_xlate); +- if (IS_ERR(phy_provider)) +- return PTR_ERR(phy_provider); ++ if (IS_ERR(phy_provider)) { ++ error = PTR_ERR(phy_provider); ++ goto out_reg_disable; ++ } + + error = cpcap_usb_init_optional_pins(ddata); + if (error) +- return error; ++ goto out_reg_disable; + + cpcap_usb_init_optional_gpios(ddata); + + error = cpcap_usb_init_iio(ddata); + if (error) +- return error; ++ goto out_reg_disable; + + error = cpcap_usb_init_interrupts(pdev, ddata); + if (error) +- return error; ++ goto out_reg_disable; + + usb_add_phy_dev(&ddata->phy); + atomic_set(&ddata->active, 1); + schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1)); + + return 0; ++ ++out_reg_disable: ++ regulator_disable(ddata->vusb); ++ ++ return error; + } + + static int cpcap_usb_phy_remove(struct platform_device *pdev) +diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c +index 65fb3a3031470..30a9062d2b4b8 100644 +--- a/drivers/platform/x86/intel-vbtn.c ++++ b/drivers/platform/x86/intel-vbtn.c +@@ -216,6 +216,12 @@ static const struct dmi_system_id dmi_switches_allow_list[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"), + }, + }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7352"), ++ }, ++ }, + {} /* Array terminator */ + }; + +diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c +index c404706379d92..69402758b99c3 100644 +--- a/drivers/platform/x86/thinkpad_acpi.c ++++ b/drivers/platform/x86/thinkpad_acpi.c +@@ -8782,6 +8782,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = { + TPACPI_Q_LNV3('N', '1', 'T', TPACPI_FAN_2CTL), /* P71 */ + TPACPI_Q_LNV3('N', '1', 'U', TPACPI_FAN_2CTL), /* P51 */ + TPACPI_Q_LNV3('N', '2', 'C', TPACPI_FAN_2CTL), /* P52 / P72 */ ++ TPACPI_Q_LNV3('N', '2', 'N', TPACPI_FAN_2CTL), /* P53 / P73 */ + TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (1st gen) */ + TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */ + TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (3nd gen) */ +diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c +index 5783139d0a119..c4de932302d6b 100644 +--- a/drivers/platform/x86/touchscreen_dmi.c ++++ b/drivers/platform/x86/touchscreen_dmi.c +@@ -263,6 +263,16 @@ static const struct ts_dmi_data digma_citi_e200_data = { + .properties = digma_citi_e200_props, + }; + ++static const struct property_entry estar_beauty_hd_props[] = { ++ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), ++ { } ++}; ++ ++static const struct ts_dmi_data estar_beauty_hd_data = { ++ .acpi_name = "GDIX1001:00", ++ .properties = estar_beauty_hd_props, ++}; ++ + static const struct property_entry gp_electronic_t701_props[] = { + PROPERTY_ENTRY_U32("touchscreen-size-x", 960), + PROPERTY_ENTRY_U32("touchscreen-size-y", 640), +@@ -942,6 +952,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = { + DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"), + }, + }, ++ { ++ /* Estar Beauty HD (MID 7316R) */ ++ .driver_data = (void *)&estar_beauty_hd_data, ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Estar"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"), ++ }, ++ }, + { + /* GP-electronic T701 */ + .driver_data = (void *)&gp_electronic_t701_data, +diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c +index a2beee6e09f06..5988c300cc82e 100644 +--- a/drivers/scsi/fnic/vnic_dev.c ++++ b/drivers/scsi/fnic/vnic_dev.c +@@ -444,7 +444,8 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) + fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); + if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ + pr_err("error in devcmd2 init"); +- return -ENODEV; ++ err = -ENODEV; ++ goto err_free_wq; + } + + /* +@@ -460,7 +461,7 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) + err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring, + DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); + if (err) +- goto err_free_wq; ++ goto err_disable_wq; + + vdev->devcmd2->result = + (struct devcmd2_result *) vdev->devcmd2->results_ring.descs; +@@ -481,8 +482,9 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) + + err_free_desc_ring: + vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); +-err_free_wq: ++err_disable_wq: + vnic_wq_disable(&vdev->devcmd2->wq); ++err_free_wq: + vnic_wq_free(&vdev->devcmd2->wq); + err_free_devcmd2: + kfree(vdev->devcmd2); +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c +index 070cf516b98fe..57c9a71fa33a7 100644 +--- a/drivers/scsi/ibmvscsi/ibmvfc.c ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c +@@ -2957,8 +2957,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev) + unsigned long flags = 0; + + spin_lock_irqsave(shost->host_lock, flags); +- if (sdev->type == TYPE_DISK) ++ if (sdev->type == TYPE_DISK) { + sdev->allow_restart = 1; ++ blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); ++ } + spin_unlock_irqrestore(shost->host_lock, flags); + return 0; + } +diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c +index 96a2952cf626b..a50f1eef0e0cd 100644 +--- a/drivers/scsi/libfc/fc_exch.c ++++ b/drivers/scsi/libfc/fc_exch.c +@@ -1624,8 +1624,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) + rc = fc_exch_done_locked(ep); + WARN_ON(fc_seq_exch(sp) != ep); + spin_unlock_bh(&ep->ex_lock); +- if (!rc) ++ if (!rc) { + fc_exch_delete(ep); ++ } else { ++ FC_EXCH_DBG(ep, "ep is completed already," ++ "hence skip calling the resp\n"); ++ goto skip_resp; ++ } + } + + /* +@@ -1644,6 +1649,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) + if (!fc_invoke_resp(ep, sp, fp)) + fc_frame_free(fp); + ++skip_resp: + fc_exch_release(ep); + return; + rel: +@@ -1900,10 +1906,16 @@ static void fc_exch_reset(struct fc_exch *ep) + + fc_exch_hold(ep); + +- if (!rc) ++ if (!rc) { + fc_exch_delete(ep); ++ } else { ++ FC_EXCH_DBG(ep, "ep is completed already," ++ "hence skip calling the resp\n"); ++ goto skip_resp; ++ } + + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED)); ++skip_resp: + fc_seq_set_resp(sp, NULL, ep->arg); + fc_exch_release(ep); + } +diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c +index cba1cf6a1c12d..1e939a2a387f3 100644 +--- a/drivers/scsi/scsi_transport_srp.c ++++ b/drivers/scsi/scsi_transport_srp.c +@@ -541,7 +541,14 @@ int srp_reconnect_rport(struct srp_rport *rport) + res = mutex_lock_interruptible(&rport->mutex); + if (res) + goto out; +- scsi_target_block(&shost->shost_gendev); ++ if (rport->state != SRP_RPORT_FAIL_FAST) ++ /* ++ * sdev state must be SDEV_TRANSPORT_OFFLINE, transition ++ * to SDEV_BLOCK is illegal. Calling scsi_target_unblock() ++ * later is ok though, scsi_internal_device_unblock_nowait() ++ * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK. ++ */ ++ scsi_target_block(&shost->shost_gendev); + res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV; + pr_debug("%s (state %d): transport.reconnect() returned %d\n", + dev_name(&shost->shost_gendev), rport->state, res); +diff --git a/fs/udf/super.c b/fs/udf/super.c +index 5bef3a68395d8..d0df217f4712a 100644 +--- a/fs/udf/super.c ++++ b/fs/udf/super.c +@@ -705,6 +705,7 @@ static int udf_check_vsd(struct super_block *sb) + struct buffer_head *bh = NULL; + int nsr = 0; + struct udf_sb_info *sbi; ++ loff_t session_offset; + + sbi = UDF_SB(sb); + if (sb->s_blocksize < sizeof(struct volStructDesc)) +@@ -712,7 +713,8 @@ static int udf_check_vsd(struct super_block *sb) + else + sectorsize = sb->s_blocksize; + +- sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits); ++ session_offset = (loff_t)sbi->s_session << sb->s_blocksize_bits; ++ sector += session_offset; + + udf_debug("Starting at sector %u (%lu byte sectors)\n", + (unsigned int)(sector >> sb->s_blocksize_bits), +@@ -757,8 +759,7 @@ static int udf_check_vsd(struct super_block *sb) + + if (nsr > 0) + return 1; +- else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) == +- VSD_FIRST_SECTOR_OFFSET) ++ else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET) + return -1; + else + return 0; +diff --git a/include/linux/kthread.h b/include/linux/kthread.h +index 65b81e0c494d2..2484ed97e72f5 100644 +--- a/include/linux/kthread.h ++++ b/include/linux/kthread.h +@@ -33,6 +33,9 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), + unsigned int cpu, + const char *namefmt); + ++void kthread_set_per_cpu(struct task_struct *k, int cpu); ++bool kthread_is_per_cpu(struct task_struct *k); ++ + /** + * kthread_run - create and wake a thread. + * @threadfn: the function to run until signal_pending(current). +diff --git a/include/linux/nvme.h b/include/linux/nvme.h +index d925359976873..bfed36e342ccb 100644 +--- a/include/linux/nvme.h ++++ b/include/linux/nvme.h +@@ -116,6 +116,9 @@ enum { + NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer + * Location + */ ++ NVME_REG_CMBMSC = 0x0050, /* Controller Memory Buffer Memory ++ * Space Control ++ */ + NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */ + NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */ + NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */ +@@ -135,6 +138,7 @@ enum { + #define NVME_CAP_CSS(cap) (((cap) >> 37) & 0xff) + #define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) + #define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf) ++#define NVME_CAP_CMBS(cap) (((cap) >> 57) & 0x1) + + #define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7) + #define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff) +@@ -192,6 +196,8 @@ enum { + NVME_CSTS_SHST_OCCUR = 1 << 2, + NVME_CSTS_SHST_CMPLT = 2 << 2, + NVME_CSTS_SHST_MASK = 3 << 2, ++ NVME_CMBMSC_CRE = 1 << 0, ++ NVME_CMBMSC_CMSE = 1 << 1, + }; + + struct nvme_id_power_state { +diff --git a/kernel/kthread.c b/kernel/kthread.c +index 933a625621b8d..5edf7e19ab262 100644 +--- a/kernel/kthread.c ++++ b/kernel/kthread.c +@@ -493,11 +493,36 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), + return p; + kthread_bind(p, cpu); + /* CPU hotplug need to bind once again when unparking the thread. */ +- set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags); + to_kthread(p)->cpu = cpu; + return p; + } + ++void kthread_set_per_cpu(struct task_struct *k, int cpu) ++{ ++ struct kthread *kthread = to_kthread(k); ++ if (!kthread) ++ return; ++ ++ WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY)); ++ ++ if (cpu < 0) { ++ clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags); ++ return; ++ } ++ ++ kthread->cpu = cpu; ++ set_bit(KTHREAD_IS_PER_CPU, &kthread->flags); ++} ++ ++bool kthread_is_per_cpu(struct task_struct *k) ++{ ++ struct kthread *kthread = to_kthread(k); ++ if (!kthread) ++ return false; ++ ++ return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags); ++} ++ + /** + * kthread_unpark - unpark a thread created by kthread_create(). + * @k: thread created by kthread_create(). +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c +index 02bc5b8f1eb27..bdaf4829098c0 100644 +--- a/kernel/locking/lockdep.c ++++ b/kernel/locking/lockdep.c +@@ -5271,12 +5271,15 @@ static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie + /* + * Check whether we follow the irq-flags state precisely: + */ +-static void check_flags(unsigned long flags) ++static noinstr void check_flags(unsigned long flags) + { + #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) + if (!debug_locks) + return; + ++ /* Get the warning out.. */ ++ instrumentation_begin(); ++ + if (irqs_disabled_flags(flags)) { + if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) { + printk("possible reason: unannotated irqs-off.\n"); +@@ -5304,6 +5307,8 @@ static void check_flags(unsigned long flags) + + if (!debug_locks) + print_irqtrace_events(current); ++ ++ instrumentation_end(); + #endif + } + +diff --git a/kernel/smpboot.c b/kernel/smpboot.c +index 2efe1e206167c..f25208e8df836 100644 +--- a/kernel/smpboot.c ++++ b/kernel/smpboot.c +@@ -188,6 +188,7 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu) + kfree(td); + return PTR_ERR(tsk); + } ++ kthread_set_per_cpu(tsk, cpu); + /* + * Park the thread so that it could start right on the CPU + * when it is available. +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index 0695c7895c892..1d99c52cc99a6 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -1845,12 +1845,6 @@ static void worker_attach_to_pool(struct worker *worker, + { + mutex_lock(&wq_pool_attach_mutex); + +- /* +- * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any +- * online CPUs. It'll be re-applied when any of the CPUs come up. +- */ +- set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); +- + /* + * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains + * stable across this function. See the comments above the flag +@@ -1859,6 +1853,9 @@ static void worker_attach_to_pool(struct worker *worker, + if (pool->flags & POOL_DISASSOCIATED) + worker->flags |= WORKER_UNBOUND; + ++ if (worker->rescue_wq) ++ set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); ++ + list_add_tail(&worker->node, &pool->workers); + worker->pool = pool; + +diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c +index 90470392fdaa7..de5cd3818690c 100644 +--- a/net/mac80211/debugfs.c ++++ b/net/mac80211/debugfs.c +@@ -120,18 +120,17 @@ static ssize_t aqm_write(struct file *file, + { + struct ieee80211_local *local = file->private_data; + char buf[100]; +- size_t len; + +- if (count > sizeof(buf)) ++ if (count >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + +- buf[sizeof(buf) - 1] = '\0'; +- len = strlen(buf); +- if (len > 0 && buf[len-1] == '\n') +- buf[len-1] = 0; ++ if (count && buf[count - 1] == '\n') ++ buf[count - 1] = '\0'; ++ else ++ buf[count] = '\0'; + + if (sscanf(buf, "fq_limit %u", &local->fq.limit) == 1) + return count; +@@ -177,18 +176,17 @@ static ssize_t airtime_flags_write(struct file *file, + { + struct ieee80211_local *local = file->private_data; + char buf[16]; +- size_t len; + +- if (count > sizeof(buf)) ++ if (count >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + +- buf[sizeof(buf) - 1] = 0; +- len = strlen(buf); +- if (len > 0 && buf[len - 1] == '\n') +- buf[len - 1] = 0; ++ if (count && buf[count - 1] == '\n') ++ buf[count - 1] = '\0'; ++ else ++ buf[count] = '\0'; + + if (kstrtou16(buf, 0, &local->airtime_flags)) + return -EINVAL; +@@ -237,20 +235,19 @@ static ssize_t aql_txq_limit_write(struct file *file, + { + struct ieee80211_local *local = file->private_data; + char buf[100]; +- size_t len; + u32 ac, q_limit_low, q_limit_high, q_limit_low_old, q_limit_high_old; + struct sta_info *sta; + +- if (count > sizeof(buf)) ++ if (count >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + +- buf[sizeof(buf) - 1] = 0; +- len = strlen(buf); +- if (len > 0 && buf[len - 1] == '\n') +- buf[len - 1] = 0; ++ if (count && buf[count - 1] == '\n') ++ buf[count - 1] = '\0'; ++ else ++ buf[count] = '\0'; + + if (sscanf(buf, "%u %u %u", &ac, &q_limit_low, &q_limit_high) != 3) + return -EINVAL; +@@ -306,18 +303,17 @@ static ssize_t force_tx_status_write(struct file *file, + { + struct ieee80211_local *local = file->private_data; + char buf[3]; +- size_t len; + +- if (count > sizeof(buf)) ++ if (count >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + +- buf[sizeof(buf) - 1] = '\0'; +- len = strlen(buf); +- if (len > 0 && buf[len - 1] == '\n') +- buf[len - 1] = 0; ++ if (count && buf[count - 1] == '\n') ++ buf[count - 1] = '\0'; ++ else ++ buf[count] = '\0'; + + if (buf[0] == '0' && buf[1] == '\0') + local->force_tx_status = 0; +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c +index 2a5a11f92b03e..98517423b0b76 100644 +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -4191,6 +4191,8 @@ void ieee80211_check_fast_rx(struct sta_info *sta) + + rcu_read_lock(); + key = rcu_dereference(sta->ptk[sta->ptk_idx]); ++ if (!key) ++ key = rcu_dereference(sdata->default_unicast_key); + if (key) { + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_TKIP: +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c +index ca1e9de388910..88868bf300513 100644 +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -4278,7 +4278,6 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb, + struct ethhdr *ehdr = (struct ethhdr *)skb->data; + struct ieee80211_key *key; + struct sta_info *sta; +- bool offload = true; + + if (unlikely(skb->len < ETH_HLEN)) { + kfree_skb(skb); +@@ -4294,18 +4293,22 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb, + + if (unlikely(IS_ERR_OR_NULL(sta) || !sta->uploaded || + !test_sta_flag(sta, WLAN_STA_AUTHORIZED) || +- sdata->control_port_protocol == ehdr->h_proto)) +- offload = false; +- else if ((key = rcu_dereference(sta->ptk[sta->ptk_idx])) && +- (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) || +- key->conf.cipher == WLAN_CIPHER_SUITE_TKIP)) +- offload = false; +- +- if (offload) +- ieee80211_8023_xmit(sdata, dev, sta, key, skb); +- else +- ieee80211_subif_start_xmit(skb, dev); ++ sdata->control_port_protocol == ehdr->h_proto)) ++ goto skip_offload; ++ ++ key = rcu_dereference(sta->ptk[sta->ptk_idx]); ++ if (!key) ++ key = rcu_dereference(sdata->default_unicast_key); ++ ++ if (key && (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) || ++ key->conf.cipher == WLAN_CIPHER_SUITE_TKIP)) ++ goto skip_offload; ++ ++ ieee80211_8023_xmit(sdata, dev, sta, key, skb); ++ goto out; + ++skip_offload: ++ ieee80211_subif_start_xmit(skb, dev); + out: + rcu_read_unlock(); + +diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c +index 23d8685453627..2c1ffc9ba2eb2 100644 +--- a/net/switchdev/switchdev.c ++++ b/net/switchdev/switchdev.c +@@ -460,10 +460,11 @@ static int __switchdev_handle_port_obj_add(struct net_device *dev, + extack = switchdev_notifier_info_to_extack(&port_obj_info->info); + + if (check_cb(dev)) { +- /* This flag is only checked if the return value is success. */ +- port_obj_info->handled = true; +- return add_cb(dev, port_obj_info->obj, port_obj_info->trans, +- extack); ++ err = add_cb(dev, port_obj_info->obj, port_obj_info->trans, ++ extack); ++ if (err != -EOPNOTSUPP) ++ port_obj_info->handled = true; ++ return err; + } + + /* Switch ports might be stacked under e.g. a LAG. Ignore the +@@ -515,9 +516,10 @@ static int __switchdev_handle_port_obj_del(struct net_device *dev, + int err = -EOPNOTSUPP; + + if (check_cb(dev)) { +- /* This flag is only checked if the return value is success. */ +- port_obj_info->handled = true; +- return del_cb(dev, port_obj_info->obj); ++ err = del_cb(dev, port_obj_info->obj); ++ if (err != -EOPNOTSUPP) ++ port_obj_info->handled = true; ++ return err; + } + + /* Switch ports might be stacked under e.g. a LAG. Ignore the +@@ -568,9 +570,10 @@ static int __switchdev_handle_port_attr_set(struct net_device *dev, + int err = -EOPNOTSUPP; + + if (check_cb(dev)) { +- port_attr_info->handled = true; +- return set_cb(dev, port_attr_info->attr, +- port_attr_info->trans); ++ err = set_cb(dev, port_attr_info->attr, port_attr_info->trans); ++ if (err != -EOPNOTSUPP) ++ port_attr_info->handled = true; ++ return err; + } + + /* Switch ports might be stacked under e.g. a LAG. Ignore the +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 770ad25f1907c..d393401db1ec5 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -2484,6 +2484,9 @@ static const struct pci_device_id azx_ids[] = { + /* CometLake-S */ + { PCI_DEVICE(0x8086, 0xa3f0), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, ++ /* CometLake-R */ ++ { PCI_DEVICE(0x8086, 0xf0c8), ++ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + /* Icelake */ + { PCI_DEVICE(0x8086, 0x34c8), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, +@@ -2507,6 +2510,9 @@ static const struct pci_device_id azx_ids[] = { + /* Alderlake-S */ + { PCI_DEVICE(0x8086, 0x7ad0), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, ++ /* Alderlake-P */ ++ { PCI_DEVICE(0x8086, 0x51c8), ++ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + /* Elkhart Lake */ + { PCI_DEVICE(0x8086, 0x4b55), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c +index d12b4799c3cb7..dc1ab4fc93a5b 100644 +--- a/sound/pci/hda/patch_hdmi.c ++++ b/sound/pci/hda/patch_hdmi.c +@@ -4349,6 +4349,7 @@ HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi), + HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi), + HDA_CODEC_ENTRY(0x80862814, "DG1 HDMI", patch_i915_tgl_hdmi), + HDA_CODEC_ENTRY(0x80862815, "Alderlake HDMI", patch_i915_tgl_hdmi), ++HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi), + HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi), + HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi), + HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI", patch_i915_icl_hdmi), +diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c +index 8b0ddc4b8227b..8d65004c917a1 100644 +--- a/sound/soc/sof/intel/hda-codec.c ++++ b/sound/soc/sof/intel/hda-codec.c +@@ -93,8 +93,7 @@ void hda_codec_jack_check(struct snd_sof_dev *sdev) + * has been recorded in STATESTS + */ + if (codec->jacktbl.used) +- schedule_delayed_work(&codec->jackpoll_work, +- codec->jackpoll_interval); ++ pm_request_resume(&codec->core.dev); + } + #else + void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev) {} +diff --git a/tools/objtool/check.c b/tools/objtool/check.c +index c6ab44543c92a..956383d5fa62e 100644 +--- a/tools/objtool/check.c ++++ b/tools/objtool/check.c +@@ -2921,14 +2921,10 @@ int check(struct objtool_file *file) + warnings += ret; + + out: +- if (ret < 0) { +- /* +- * Fatal error. The binary is corrupt or otherwise broken in +- * some way, or objtool itself is broken. Fail the kernel +- * build. +- */ +- return ret; +- } +- ++ /* ++ * For now, don't fail the kernel build on fatal warnings. These ++ * errors are still fairly common due to the growing matrix of ++ * supported toolchains and their recent pace of change. ++ */ + return 0; + } +diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c +index 9452cfb01ef19..f4f3e8d995930 100644 +--- a/tools/objtool/elf.c ++++ b/tools/objtool/elf.c +@@ -425,6 +425,13 @@ static int read_symbols(struct elf *elf) + list_add(&sym->list, entry); + elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx); + elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name)); ++ ++ /* ++ * Don't store empty STT_NOTYPE symbols in the rbtree. They ++ * can exist within a function, confusing the sorting. ++ */ ++ if (!sym->len) ++ rb_erase(&sym->node, &sym->sec->symbol_tree); + } + + if (stats) +diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c +index cd089a5058594..ead9e51f75ada 100644 +--- a/tools/power/x86/intel-speed-select/isst-config.c ++++ b/tools/power/x86/intel-speed-select/isst-config.c +@@ -1245,6 +1245,8 @@ static void dump_isst_config(int arg) + isst_ctdp_display_information_end(outf); + } + ++static void adjust_scaling_max_from_base_freq(int cpu); ++ + static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3, + void *arg4) + { +@@ -1263,6 +1265,9 @@ static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3, + int pkg_id = get_physical_package_id(cpu); + int die_id = get_physical_die_id(cpu); + ++ /* Wait for updated base frequencies */ ++ usleep(2000); ++ + fprintf(stderr, "Option is set to online/offline\n"); + ctdp_level.core_cpumask_size = + alloc_cpu_set(&ctdp_level.core_cpumask); +@@ -1279,6 +1284,7 @@ static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3, + if (CPU_ISSET_S(i, ctdp_level.core_cpumask_size, ctdp_level.core_cpumask)) { + fprintf(stderr, "online cpu %d\n", i); + set_cpu_online_offline(i, 1); ++ adjust_scaling_max_from_base_freq(i); + } else { + fprintf(stderr, "offline cpu %d\n", i); + set_cpu_online_offline(i, 0); +@@ -1436,6 +1442,31 @@ static int set_cpufreq_scaling_min_max(int cpu, int max, int freq) + return 0; + } + ++static int no_turbo(void) ++{ ++ return parse_int_file(0, "/sys/devices/system/cpu/intel_pstate/no_turbo"); ++} ++ ++static void adjust_scaling_max_from_base_freq(int cpu) ++{ ++ int base_freq, scaling_max_freq; ++ ++ scaling_max_freq = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_max_freq", cpu); ++ base_freq = get_cpufreq_base_freq(cpu); ++ if (scaling_max_freq < base_freq || no_turbo()) ++ set_cpufreq_scaling_min_max(cpu, 1, base_freq); ++} ++ ++static void adjust_scaling_min_from_base_freq(int cpu) ++{ ++ int base_freq, scaling_min_freq; ++ ++ scaling_min_freq = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_min_freq", cpu); ++ base_freq = get_cpufreq_base_freq(cpu); ++ if (scaling_min_freq < base_freq) ++ set_cpufreq_scaling_min_max(cpu, 0, base_freq); ++} ++ + static int set_clx_pbf_cpufreq_scaling_min_max(int cpu) + { + struct isst_pkg_ctdp_level_info *ctdp_level; +@@ -1533,6 +1564,7 @@ static void set_scaling_min_to_cpuinfo_max(int cpu) + continue; + + set_cpufreq_scaling_min_max_from_cpuinfo(i, 1, 0); ++ adjust_scaling_min_from_base_freq(i); + } + } + +diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c +index cb53a8b777e68..c25cf7cd45e9f 100644 +--- a/tools/testing/selftests/powerpc/alignment/alignment_handler.c ++++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c +@@ -443,7 +443,6 @@ int test_alignment_handler_integer(void) + LOAD_DFORM_TEST(ldu); + LOAD_XFORM_TEST(ldx); + LOAD_XFORM_TEST(ldux); +- LOAD_DFORM_TEST(lmw); + STORE_DFORM_TEST(stb); + STORE_XFORM_TEST(stbx); + STORE_DFORM_TEST(stbu); +@@ -462,7 +461,11 @@ int test_alignment_handler_integer(void) + STORE_XFORM_TEST(stdx); + STORE_DFORM_TEST(stdu); + STORE_XFORM_TEST(stdux); ++ ++#ifdef __BIG_ENDIAN__ ++ LOAD_DFORM_TEST(lmw); + STORE_DFORM_TEST(stmw); ++#endif + + return rc; + } diff --git a/patch/kernel/sunxi-current/patch-5.10.14-15.patch b/patch/kernel/sunxi-current/patch-5.10.14-15.patch new file mode 100644 index 000000000..38e15f951 --- /dev/null +++ b/patch/kernel/sunxi-current/patch-5.10.14-15.patch @@ -0,0 +1,4339 @@ +diff --git a/Documentation/filesystems/overlayfs.rst b/Documentation/filesystems/overlayfs.rst +index 580ab9a0fe319..137afeb3f581c 100644 +--- a/Documentation/filesystems/overlayfs.rst ++++ b/Documentation/filesystems/overlayfs.rst +@@ -575,6 +575,14 @@ without significant effort. + The advantage of mounting with the "volatile" option is that all forms of + sync calls to the upper filesystem are omitted. + ++In order to avoid a giving a false sense of safety, the syncfs (and fsync) ++semantics of volatile mounts are slightly different than that of the rest of ++VFS. If any writeback error occurs on the upperdir's filesystem after a ++volatile mount takes place, all sync functions will return an error. Once this ++condition is reached, the filesystem will not recover, and every subsequent sync ++call will return an error, even if the upperdir has not experience a new error ++since the last sync call. ++ + When overlay is mounted with "volatile" option, the directory + "$workdir/work/incompat/volatile" is created. During next mount, overlay + checks for this directory and refuses to mount if present. This is a strong +diff --git a/Makefile b/Makefile +index bb3770be9779d..b62d2d4ea7b02 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 10 +-SUBLEVEL = 14 ++SUBLEVEL = 15 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +@@ -812,10 +812,12 @@ KBUILD_CFLAGS += -ftrivial-auto-var-init=zero + KBUILD_CFLAGS += -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang + endif + ++DEBUG_CFLAGS := ++ + # Workaround for GCC versions < 5.0 + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61801 + ifdef CONFIG_CC_IS_GCC +-DEBUG_CFLAGS := $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments)) ++DEBUG_CFLAGS += $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments)) + endif + + ifdef CONFIG_DEBUG_INFO +@@ -948,12 +950,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) + # change __FILE__ to the relative path from the srctree + KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) + +-# ensure -fcf-protection is disabled when using retpoline as it is +-# incompatible with -mindirect-branch=thunk-extern +-ifdef CONFIG_RETPOLINE +-KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) +-endif +- + # include additional Makefiles when needed + include-y := scripts/Makefile.extrawarn + include-$(CONFIG_KASAN) += scripts/Makefile.kasan +diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi +index c8745bc800f71..7b8c18e6605e4 100644 +--- a/arch/arm/boot/dts/omap3-gta04.dtsi ++++ b/arch/arm/boot/dts/omap3-gta04.dtsi +@@ -114,7 +114,7 @@ + gpio-sck = <&gpio1 12 GPIO_ACTIVE_HIGH>; + gpio-miso = <&gpio1 18 GPIO_ACTIVE_HIGH>; + gpio-mosi = <&gpio1 20 GPIO_ACTIVE_HIGH>; +- cs-gpios = <&gpio1 19 GPIO_ACTIVE_HIGH>; ++ cs-gpios = <&gpio1 19 GPIO_ACTIVE_LOW>; + num-chipselects = <1>; + + /* lcd panel */ +@@ -124,7 +124,6 @@ + spi-max-frequency = <100000>; + spi-cpol; + spi-cpha; +- spi-cs-high; + + backlight= <&backlight>; + label = "lcd"; +diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi +index 62ab23824a3e7..e4d287d994214 100644 +--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi ++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi +@@ -35,7 +35,7 @@ + */ + rs485-rx-en { + gpio-hog; +- gpios = <8 GPIO_ACTIVE_HIGH>; ++ gpios = <8 0>; + output-low; + line-name = "rs485-rx-en"; + }; +@@ -63,7 +63,7 @@ + */ + usb-hub { + gpio-hog; +- gpios = <2 GPIO_ACTIVE_HIGH>; ++ gpios = <2 0>; + output-high; + line-name = "usb-hub-reset"; + }; +@@ -87,6 +87,12 @@ + }; + }; + ++&i2c4 { ++ touchscreen@49 { ++ status = "disabled"; ++ }; ++}; ++ + &i2c5 { /* TP7/TP8 */ + pinctrl-names = "default"; + pinctrl-0 = <&i2c5_pins_a>; +@@ -104,7 +110,7 @@ + * are used for on-board microSD slot instead. + */ + /delete-property/broken-cd; +- cd-gpios = <&gpioi 10 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; ++ cd-gpios = <&gpioi 10 GPIO_ACTIVE_HIGH>; + disable-wp; + }; + +diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi +index f796a6150313e..2d027dafb7bce 100644 +--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi ++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi +@@ -353,7 +353,8 @@ + pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_dir_pins_a>; + pinctrl-1 = <&sdmmc1_b4_od_pins_a &sdmmc1_dir_pins_a>; + pinctrl-2 = <&sdmmc1_b4_sleep_pins_a &sdmmc1_dir_sleep_pins_a>; +- broken-cd; ++ cd-gpios = <&gpiog 1 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; ++ disable-wp; + st,sig-dir; + st,neg-edge; + st,use-ckin; +diff --git a/arch/arm/include/debug/tegra.S b/arch/arm/include/debug/tegra.S +index 98daa7f483148..7454480d084b2 100644 +--- a/arch/arm/include/debug/tegra.S ++++ b/arch/arm/include/debug/tegra.S +@@ -149,7 +149,34 @@ + + .align + 99: .word . ++#if defined(ZIMAGE) ++ .word . + 4 ++/* ++ * Storage for the state maintained by the macro. ++ * ++ * In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c. ++ * That's because this header is included from multiple files, and we only ++ * want a single copy of the data. In particular, the UART probing code above ++ * assumes it's running using physical addresses. This is true when this file ++ * is included from head.o, but not when included from debug.o. So we need ++ * to share the probe results between the two copies, rather than having ++ * to re-run the probing again later. ++ * ++ * In the decompressor, we put the storage right here, since common.c ++ * isn't included in the decompressor build. This storage data gets put in ++ * .text even though it's really data, since .data is discarded from the ++ * decompressor. Luckily, .text is writeable in the decompressor, unless ++ * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug. ++ */ ++ /* Debug UART initialization required */ ++ .word 1 ++ /* Debug UART physical address */ ++ .word 0 ++ /* Debug UART virtual address */ ++ .word 0 ++#else + .word tegra_uart_config ++#endif + .ltorg + + /* Load previously selected UART address */ +@@ -189,30 +216,3 @@ + + .macro waituarttxrdy,rd,rx + .endm +- +-/* +- * Storage for the state maintained by the macros above. +- * +- * In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c. +- * That's because this header is included from multiple files, and we only +- * want a single copy of the data. In particular, the UART probing code above +- * assumes it's running using physical addresses. This is true when this file +- * is included from head.o, but not when included from debug.o. So we need +- * to share the probe results between the two copies, rather than having +- * to re-run the probing again later. +- * +- * In the decompressor, we put the symbol/storage right here, since common.c +- * isn't included in the decompressor build. This symbol gets put in .text +- * even though it's really data, since .data is discarded from the +- * decompressor. Luckily, .text is writeable in the decompressor, unless +- * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug. +- */ +-#if defined(ZIMAGE) +-tegra_uart_config: +- /* Debug UART initialization required */ +- .word 1 +- /* Debug UART physical address */ +- .word 0 +- /* Debug UART virtual address */ +- .word 0 +-#endif +diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c +index 416462e3f5d63..f9713dc561cf7 100644 +--- a/arch/arm/mach-footbridge/dc21285.c ++++ b/arch/arm/mach-footbridge/dc21285.c +@@ -65,15 +65,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where, + if (addr) + switch (size) { + case 1: +- asm("ldrb %0, [%1, %2]" ++ asm volatile("ldrb %0, [%1, %2]" + : "=r" (v) : "r" (addr), "r" (where) : "cc"); + break; + case 2: +- asm("ldrh %0, [%1, %2]" ++ asm volatile("ldrh %0, [%1, %2]" + : "=r" (v) : "r" (addr), "r" (where) : "cc"); + break; + case 4: +- asm("ldr %0, [%1, %2]" ++ asm volatile("ldr %0, [%1, %2]" + : "=r" (v) : "r" (addr), "r" (where) : "cc"); + break; + } +@@ -99,17 +99,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where, + if (addr) + switch (size) { + case 1: +- asm("strb %0, [%1, %2]" ++ asm volatile("strb %0, [%1, %2]" + : : "r" (value), "r" (addr), "r" (where) + : "cc"); + break; + case 2: +- asm("strh %0, [%1, %2]" ++ asm volatile("strh %0, [%1, %2]" + : : "r" (value), "r" (addr), "r" (where) + : "cc"); + break; + case 4: +- asm("str %0, [%1, %2]" ++ asm volatile("str %0, [%1, %2]" + : : "r" (value), "r" (addr), "r" (where) + : "cc"); + break; +diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c +index a720259099edf..0a4c9b0b13b0c 100644 +--- a/arch/arm/mach-omap1/board-osk.c ++++ b/arch/arm/mach-omap1/board-osk.c +@@ -203,6 +203,8 @@ static int osk_tps_setup(struct i2c_client *client, void *context) + */ + gpio_request(OSK_TPS_GPIO_USB_PWR_EN, "n_vbus_en"); + gpio_direction_output(OSK_TPS_GPIO_USB_PWR_EN, 1); ++ /* Free the GPIO again as the driver will request it */ ++ gpio_free(OSK_TPS_GPIO_USB_PWR_EN); + + /* Set GPIO 2 high so LED D3 is off by default */ + tps65010_set_gpio_out_value(GPIO2, HIGH); +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi +index 8514fe6a275a3..a6127002573bd 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi ++++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi +@@ -2384,7 +2384,7 @@ + interrupts = ; + dr_mode = "host"; + snps,dis_u2_susphy_quirk; +- snps,quirk-frame-length-adjustment; ++ snps,quirk-frame-length-adjustment = <0x20>; + snps,parkmode-disable-ss-quirk; + }; + }; +diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts +index cf5a98f0e47c8..a712273c905af 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts ++++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts +@@ -52,7 +52,7 @@ + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + +- gpio = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_HIGH>; ++ gpio = <&gpio_ao GPIOAO_3 GPIO_OPEN_DRAIN>; + enable-active-high; + regulator-always-on; + }; +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi +index 1fa39bacff4b3..0b4545012d43e 100644 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi +@@ -385,7 +385,7 @@ + + dcfg: dcfg@1ee0000 { + compatible = "fsl,ls1046a-dcfg", "syscon"; +- reg = <0x0 0x1ee0000 0x0 0x10000>; ++ reg = <0x0 0x1ee0000 0x0 0x1000>; + big-endian; + }; + +diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +index 76a8c996d497f..d70aae77a6e84 100644 +--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts ++++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +@@ -263,6 +263,8 @@ + &i2c3 { + status = "okay"; + clock-frequency = <400000>; ++ /* Overwrite pinctrl-0 from sdm845.dtsi */ ++ pinctrl-0 = <&qup_i2c3_default &i2c3_hid_active>; + + tsel: hid@15 { + compatible = "hid-over-i2c"; +@@ -270,9 +272,6 @@ + hid-descr-addr = <0x1>; + + interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>; +- +- pinctrl-names = "default"; +- pinctrl-0 = <&i2c3_hid_active>; + }; + + tsc2: hid@2c { +@@ -281,11 +280,6 @@ + hid-descr-addr = <0x20>; + + interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>; +- +- pinctrl-names = "default"; +- pinctrl-0 = <&i2c3_hid_active>; +- +- status = "disabled"; + }; + }; + +diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi +index 2695ea8cda142..64193292d26c3 100644 +--- a/arch/arm64/boot/dts/rockchip/px30.dtsi ++++ b/arch/arm64/boot/dts/rockchip/px30.dtsi +@@ -1097,7 +1097,7 @@ + vopl_mmu: iommu@ff470f00 { + compatible = "rockchip,iommu"; + reg = <0x0 0xff470f00 0x0 0x100>; +- interrupts = ; ++ interrupts = ; + interrupt-names = "vopl_mmu"; + clocks = <&cru ACLK_VOPL>, <&cru HCLK_VOPL>; + clock-names = "aclk", "iface"; +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts +index 06d48338c8362..219b7507a10fb 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts ++++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts +@@ -790,7 +790,6 @@ + &pcie0 { + bus-scan-delay-ms = <1000>; + ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>; +- max-link-speed = <2>; + num-lanes = <4>; + pinctrl-names = "default"; + pinctrl-0 = <&pcie_clkreqn_cpm>; +diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig +index 234a21d26f674..3474286e59db7 100644 +--- a/arch/riscv/Kconfig ++++ b/arch/riscv/Kconfig +@@ -252,8 +252,10 @@ choice + default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY + + config MAXPHYSMEM_1GB ++ depends on 32BIT + bool "1GiB" + config MAXPHYSMEM_2GB ++ depends on 64BIT && CMODEL_MEDLOW + bool "2GiB" + config MAXPHYSMEM_128GB + depends on 64BIT && CMODEL_MEDANY +diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c +index a6c4bb6c2c012..c17b8e5ec1869 100644 +--- a/arch/um/drivers/virtio_uml.c ++++ b/arch/um/drivers/virtio_uml.c +@@ -1083,6 +1083,7 @@ static void virtio_uml_release_dev(struct device *d) + } + + os_close_file(vu_dev->sock); ++ kfree(vu_dev); + } + + /* Platform device */ +@@ -1096,7 +1097,7 @@ static int virtio_uml_probe(struct platform_device *pdev) + if (!pdata) + return -EINVAL; + +- vu_dev = devm_kzalloc(&pdev->dev, sizeof(*vu_dev), GFP_KERNEL); ++ vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL); + if (!vu_dev) + return -ENOMEM; + +diff --git a/arch/x86/Makefile b/arch/x86/Makefile +index 1bf21746f4cea..6a7efa78eba22 100644 +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -127,6 +127,9 @@ else + + KBUILD_CFLAGS += -mno-red-zone + KBUILD_CFLAGS += -mcmodel=kernel ++ ++ # Intel CET isn't enabled in the kernel ++ KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) + endif + + ifdef CONFIG_X86_X32 +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h +index 57af25cb44f63..51abd44ab8c2d 100644 +--- a/arch/x86/include/asm/apic.h ++++ b/arch/x86/include/asm/apic.h +@@ -197,16 +197,6 @@ static inline bool apic_needs_pit(void) { return true; } + #endif /* !CONFIG_X86_LOCAL_APIC */ + + #ifdef CONFIG_X86_X2APIC +-/* +- * Make previous memory operations globally visible before +- * sending the IPI through x2apic wrmsr. We need a serializing instruction or +- * mfence for this. +- */ +-static inline void x2apic_wrmsr_fence(void) +-{ +- asm volatile("mfence" : : : "memory"); +-} +- + static inline void native_apic_msr_write(u32 reg, u32 v) + { + if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || +diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h +index 7f828fe497978..4819d5e5a3353 100644 +--- a/arch/x86/include/asm/barrier.h ++++ b/arch/x86/include/asm/barrier.h +@@ -84,4 +84,22 @@ do { \ + + #include + ++/* ++ * Make previous memory operations globally visible before ++ * a WRMSR. ++ * ++ * MFENCE makes writes visible, but only affects load/store ++ * instructions. WRMSR is unfortunately not a load/store ++ * instruction and is unaffected by MFENCE. The LFENCE ensures ++ * that the WRMSR is not reordered. ++ * ++ * Most WRMSRs are full serializing instructions themselves and ++ * do not require this barrier. This is only required for the ++ * IA32_TSC_DEADLINE and X2APIC MSRs. ++ */ ++static inline void weak_wrmsr_fence(void) ++{ ++ asm volatile("mfence; lfence" : : : "memory"); ++} ++ + #endif /* _ASM_X86_BARRIER_H */ +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index 113f6ca7b8284..f4c0514fc5108 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -41,6 +41,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -472,6 +473,9 @@ static int lapic_next_deadline(unsigned long delta, + { + u64 tsc; + ++ /* This MSR is special and need a special fence: */ ++ weak_wrmsr_fence(); ++ + tsc = rdtsc(); + wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); + return 0; +diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c +index b0889c48a2ac5..7eec3c154fa24 100644 +--- a/arch/x86/kernel/apic/x2apic_cluster.c ++++ b/arch/x86/kernel/apic/x2apic_cluster.c +@@ -29,7 +29,8 @@ static void x2apic_send_IPI(int cpu, int vector) + { + u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu); + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL); + } + +@@ -41,7 +42,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) + unsigned long flags; + u32 dest; + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + local_irq_save(flags); + + tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask); +diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c +index e14eae6d6ea71..032a00e5d9fa6 100644 +--- a/arch/x86/kernel/apic/x2apic_phys.c ++++ b/arch/x86/kernel/apic/x2apic_phys.c +@@ -43,7 +43,8 @@ static void x2apic_send_IPI(int cpu, int vector) + { + u32 dest = per_cpu(x86_cpu_to_apicid, cpu); + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + __x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL); + } + +@@ -54,7 +55,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) + unsigned long this_cpu; + unsigned long flags; + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + + local_irq_save(flags); + +@@ -125,7 +127,8 @@ void __x2apic_send_IPI_shorthand(int vector, u32 which) + { + unsigned long cfg = __prepare_ICR(which, vector, 0); + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + native_x2apic_icr_write(cfg, 0); + } + +diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c +index 03aa33b581658..668a4a6533d92 100644 +--- a/arch/x86/kernel/hw_breakpoint.c ++++ b/arch/x86/kernel/hw_breakpoint.c +@@ -269,6 +269,20 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end) + CPU_ENTRY_AREA_TOTAL_SIZE)) + return true; + ++ /* ++ * When FSGSBASE is enabled, paranoid_entry() fetches the per-CPU ++ * GSBASE value via __per_cpu_offset or pcpu_unit_offsets. ++ */ ++#ifdef CONFIG_SMP ++ if (within_area(addr, end, (unsigned long)__per_cpu_offset, ++ sizeof(unsigned long) * nr_cpu_ids)) ++ return true; ++#else ++ if (within_area(addr, end, (unsigned long)&pcpu_unit_offsets, ++ sizeof(pcpu_unit_offsets))) ++ return true; ++#endif ++ + for_each_possible_cpu(cpu) { + /* The original rw GDT is being used after load_direct_gdt() */ + if (within_area(addr, end, (unsigned long)get_cpu_gdt_rw(cpu), +@@ -293,6 +307,14 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end) + (unsigned long)&per_cpu(cpu_tlbstate, cpu), + sizeof(struct tlb_state))) + return true; ++ ++ /* ++ * When in guest (X86_FEATURE_HYPERVISOR), local_db_save() ++ * will read per-cpu cpu_dr7 before clear dr7 register. ++ */ ++ if (within_area(addr, end, (unsigned long)&per_cpu(cpu_dr7, cpu), ++ sizeof(cpu_dr7))) ++ return true; + } + + return false; +@@ -491,15 +513,12 @@ static int hw_breakpoint_handler(struct die_args *args) + struct perf_event *bp; + unsigned long *dr6_p; + unsigned long dr6; ++ bool bpx; + + /* The DR6 value is pointed by args->err */ + dr6_p = (unsigned long *)ERR_PTR(args->err); + dr6 = *dr6_p; + +- /* If it's a single step, TRAP bits are random */ +- if (dr6 & DR_STEP) +- return NOTIFY_DONE; +- + /* Do an early return if no trap bits are set in DR6 */ + if ((dr6 & DR_TRAP_BITS) == 0) + return NOTIFY_DONE; +@@ -509,28 +528,29 @@ static int hw_breakpoint_handler(struct die_args *args) + if (likely(!(dr6 & (DR_TRAP0 << i)))) + continue; + ++ bp = this_cpu_read(bp_per_reg[i]); ++ if (!bp) ++ continue; ++ ++ bpx = bp->hw.info.type == X86_BREAKPOINT_EXECUTE; ++ + /* +- * The counter may be concurrently released but that can only +- * occur from a call_rcu() path. We can then safely fetch +- * the breakpoint, use its callback, touch its counter +- * while we are in an rcu_read_lock() path. ++ * TF and data breakpoints are traps and can be merged, however ++ * instruction breakpoints are faults and will be raised ++ * separately. ++ * ++ * However DR6 can indicate both TF and instruction ++ * breakpoints. In that case take TF as that has precedence and ++ * delay the instruction breakpoint for the next exception. + */ +- rcu_read_lock(); ++ if (bpx && (dr6 & DR_STEP)) ++ continue; + +- bp = this_cpu_read(bp_per_reg[i]); + /* + * Reset the 'i'th TRAP bit in dr6 to denote completion of + * exception handling + */ + (*dr6_p) &= ~(DR_TRAP0 << i); +- /* +- * bp can be NULL due to lazy debug register switching +- * or due to concurrent perf counter removing. +- */ +- if (!bp) { +- rcu_read_unlock(); +- break; +- } + + perf_bp_event(bp, args->regs); + +@@ -538,11 +558,10 @@ static int hw_breakpoint_handler(struct die_args *args) + * Set up resume flag to avoid breakpoint recursion when + * returning back to origin. + */ +- if (bp->hw.info.type == X86_BREAKPOINT_EXECUTE) ++ if (bpx) + args->regs->flags |= X86_EFLAGS_RF; +- +- rcu_read_unlock(); + } ++ + /* + * Further processing in do_debug() is needed for a) user-space + * breakpoints (to generate signals) and b) when the system has +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c +index 83637a2ff6052..62157b1000f08 100644 +--- a/arch/x86/kvm/cpuid.c ++++ b/arch/x86/kvm/cpuid.c +@@ -320,7 +320,7 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, + if (cpuid->nent < vcpu->arch.cpuid_nent) + goto out; + r = -EFAULT; +- if (copy_to_user(entries, &vcpu->arch.cpuid_entries, ++ if (copy_to_user(entries, vcpu->arch.cpuid_entries, + vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) + goto out; + return 0; +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index 56cae1ff9e3fe..66a08322988f2 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -2879,6 +2879,8 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt) + ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); + *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data : + (u32)msr_data; ++ if (efer & EFER_LMA) ++ ctxt->mode = X86EMUL_MODE_PROT64; + + return X86EMUL_CONTINUE; + } +diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c +index b9265a585ea3c..c842d17240ccb 100644 +--- a/arch/x86/kvm/mmu/tdp_mmu.c ++++ b/arch/x86/kvm/mmu/tdp_mmu.c +@@ -1037,8 +1037,8 @@ bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot) + } + + /* +- * Clear non-leaf entries (and free associated page tables) which could +- * be replaced by large mappings, for GFNs within the slot. ++ * Clear leaf entries which could be replaced by large mappings, for ++ * GFNs within the slot. + */ + static void zap_collapsible_spte_range(struct kvm *kvm, + struct kvm_mmu_page *root, +@@ -1050,7 +1050,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm, + + tdp_root_for_each_pte(iter, root, start, end) { + if (!is_shadow_present_pte(iter.old_spte) || +- is_last_spte(iter.old_spte, iter.level)) ++ !is_last_spte(iter.old_spte, iter.level)) + continue; + + pfn = spte_to_pfn(iter.old_spte); +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c +index 5c9630c3f6ba1..e3e04988fdabe 100644 +--- a/arch/x86/kvm/svm/sev.c ++++ b/arch/x86/kvm/svm/sev.c +@@ -320,6 +320,8 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, + unsigned long first, last; + int ret; + ++ lockdep_assert_held(&kvm->lock); ++ + if (ulen == 0 || uaddr + ulen < uaddr) + return ERR_PTR(-EINVAL); + +@@ -1001,12 +1003,20 @@ int svm_register_enc_region(struct kvm *kvm, + if (!region) + return -ENOMEM; + ++ mutex_lock(&kvm->lock); + region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1); + if (IS_ERR(region->pages)) { + ret = PTR_ERR(region->pages); ++ mutex_unlock(&kvm->lock); + goto e_free; + } + ++ region->uaddr = range->addr; ++ region->size = range->size; ++ ++ list_add_tail(®ion->list, &sev->regions_list); ++ mutex_unlock(&kvm->lock); ++ + /* + * The guest may change the memory encryption attribute from C=0 -> C=1 + * or vice versa for this memory range. Lets make sure caches are +@@ -1015,13 +1025,6 @@ int svm_register_enc_region(struct kvm *kvm, + */ + sev_clflush_pages(region->pages, region->npages); + +- region->uaddr = range->addr; +- region->size = range->size; +- +- mutex_lock(&kvm->lock); +- list_add_tail(®ion->list, &sev->regions_list); +- mutex_unlock(&kvm->lock); +- + return ret; + + e_free: +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c +index 94b0cb8330451..f4ae3871e412a 100644 +--- a/arch/x86/kvm/svm/svm.c ++++ b/arch/x86/kvm/svm/svm.c +@@ -438,6 +438,11 @@ static int has_svm(void) + return 0; + } + ++ if (sev_active()) { ++ pr_info("KVM is unsupported when running as an SEV guest\n"); ++ return 0; ++ } ++ + return 1; + } + +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index c01aac2bac37c..82af43e14b09c 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -6874,11 +6874,20 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) + switch (index) { + case MSR_IA32_TSX_CTRL: + /* +- * No need to pass TSX_CTRL_CPUID_CLEAR through, so +- * let's avoid changing CPUID bits under the host +- * kernel's feet. ++ * TSX_CTRL_CPUID_CLEAR is handled in the CPUID ++ * interception. Keep the host value unchanged to avoid ++ * changing CPUID bits under the host kernel's feet. ++ * ++ * hle=0, rtm=0, tsx_ctrl=1 can be found with some ++ * combinations of new kernel and old userspace. If ++ * those guests run on a tsx=off host, do allow guests ++ * to use TSX_CTRL, but do not change the value on the ++ * host so that TSX remains always disabled. + */ +- vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR; ++ if (boot_cpu_has(X86_FEATURE_RTM)) ++ vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR; ++ else ++ vmx->guest_uret_msrs[j].mask = 0; + break; + default: + vmx->guest_uret_msrs[j].mask = -1ull; +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 0a302685e4d62..18a315bbcb79e 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -1376,16 +1376,24 @@ static u64 kvm_get_arch_capabilities(void) + if (!boot_cpu_has_bug(X86_BUG_MDS)) + data |= ARCH_CAP_MDS_NO; + +- /* +- * On TAA affected systems: +- * - nothing to do if TSX is disabled on the host. +- * - we emulate TSX_CTRL if present on the host. +- * This lets the guest use VERW to clear CPU buffers. +- */ +- if (!boot_cpu_has(X86_FEATURE_RTM)) +- data &= ~(ARCH_CAP_TAA_NO | ARCH_CAP_TSX_CTRL_MSR); +- else if (!boot_cpu_has_bug(X86_BUG_TAA)) ++ if (!boot_cpu_has(X86_FEATURE_RTM)) { ++ /* ++ * If RTM=0 because the kernel has disabled TSX, the host might ++ * have TAA_NO or TSX_CTRL. Clear TAA_NO (the guest sees RTM=0 ++ * and therefore knows that there cannot be TAA) but keep ++ * TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts, ++ * and we want to allow migrating those guests to tsx=off hosts. ++ */ ++ data &= ~ARCH_CAP_TAA_NO; ++ } else if (!boot_cpu_has_bug(X86_BUG_TAA)) { + data |= ARCH_CAP_TAA_NO; ++ } else { ++ /* ++ * Nothing to do here; we emulate TSX_CTRL if present on the ++ * host so the guest can choose between disabling TSX or ++ * using VERW to clear CPU buffers. ++ */ ++ } + + return data; + } +@@ -9907,6 +9915,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) + fx_init(vcpu); + + vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); ++ vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63); + + vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; + +diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c +index bc0833713be95..f80d10d39cf6d 100644 +--- a/arch/x86/mm/mem_encrypt.c ++++ b/arch/x86/mm/mem_encrypt.c +@@ -351,6 +351,7 @@ bool sev_active(void) + { + return sev_status & MSR_AMD64_SEV_ENABLED; + } ++EXPORT_SYMBOL_GPL(sev_active); + + /* Needs to be called from non-instrumentable code */ + bool noinstr sev_es_active(void) +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c +index 4ad3c4b276dcf..7e17d4edccb12 100644 +--- a/drivers/gpio/gpiolib.c ++++ b/drivers/gpio/gpiolib.c +@@ -602,7 +602,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, + ret = gdev->id; + goto err_free_gdev; + } +- dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id); ++ ++ ret = dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id); ++ if (ret) ++ goto err_free_ida; ++ + device_initialize(&gdev->dev); + dev_set_drvdata(&gdev->dev, gdev); + if (gc->parent && gc->parent->driver) +@@ -616,7 +620,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, + gdev->descs = kcalloc(gc->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL); + if (!gdev->descs) { + ret = -ENOMEM; +- goto err_free_ida; ++ goto err_free_dev_name; + } + + if (gc->ngpio == 0) { +@@ -767,6 +771,8 @@ err_free_label: + kfree_const(gdev->label); + err_free_descs: + kfree(gdev->descs); ++err_free_dev_name: ++ kfree(dev_name(&gdev->dev)); + err_free_ida: + ida_free(&gpio_ida, gdev->id); + err_free_gdev: +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index 0f7749e9424d4..580880212e551 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -2278,8 +2278,6 @@ void amdgpu_dm_update_connector_after_detect( + + drm_connector_update_edid_property(connector, + aconnector->edid); +- drm_add_edid_modes(connector, aconnector->edid); +- + if (aconnector->dc_link->aux_mode) + drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, + aconnector->edid); +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c +index e875425336406..7749b0ceabba9 100644 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c +@@ -3629,14 +3629,26 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, + return 0; + } + +-static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count) ++/** ++ * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link ++ * @link_rate: link rate in 10kbits/s units ++ * @link_lane_count: lane count ++ * ++ * Calculate the total bandwidth of a MultiStream Transport link. The returned ++ * value is in units of PBNs/(timeslots/1 MTP). This value can be used to ++ * convert the number of PBNs required for a given stream to the number of ++ * timeslots this stream requires in each MTP. ++ */ ++int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count) + { +- if (dp_link_bw == 0 || dp_link_count == 0) +- DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n", +- dp_link_bw, dp_link_count); ++ if (link_rate == 0 || link_lane_count == 0) ++ DRM_DEBUG_KMS("invalid link rate/lane count: (%d / %d)\n", ++ link_rate, link_lane_count); + +- return dp_link_bw * dp_link_count / 2; ++ /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */ ++ return link_rate * link_lane_count / 54000; + } ++EXPORT_SYMBOL(drm_dp_get_vc_payload_bw); + + /** + * drm_dp_read_mst_cap() - check whether or not a sink supports MST +@@ -3692,7 +3704,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms + goto out_unlock; + } + +- mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], ++ mgr->pbn_div = drm_dp_get_vc_payload_bw(drm_dp_bw_code_to_link_rate(mgr->dpcd[1]), + mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK); + if (mgr->pbn_div == 0) { + ret = -EINVAL; +diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c +index 3f2bbd9370a86..40dfb4d0ffbec 100644 +--- a/drivers/gpu/drm/i915/display/intel_ddi.c ++++ b/drivers/gpu/drm/i915/display/intel_ddi.c +@@ -3274,6 +3274,23 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, + intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl); + } + ++static void intel_ddi_power_up_lanes(struct intel_encoder *encoder, ++ const struct intel_crtc_state *crtc_state) ++{ ++ struct drm_i915_private *i915 = to_i915(encoder->base.dev); ++ struct intel_digital_port *dig_port = enc_to_dig_port(encoder); ++ enum phy phy = intel_port_to_phy(i915, encoder->port); ++ ++ if (intel_phy_is_combo(i915, phy)) { ++ bool lane_reversal = ++ dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; ++ ++ intel_combo_phy_power_up_lanes(i915, phy, false, ++ crtc_state->lane_count, ++ lane_reversal); ++ } ++} ++ + static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, +@@ -3367,14 +3384,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, + * 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up + * the used lanes of the DDI. + */ +- if (intel_phy_is_combo(dev_priv, phy)) { +- bool lane_reversal = +- dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; +- +- intel_combo_phy_power_up_lanes(dev_priv, phy, false, +- crtc_state->lane_count, +- lane_reversal); +- } ++ intel_ddi_power_up_lanes(encoder, crtc_state); + + /* + * 7.g Configure and enable DDI_BUF_CTL +@@ -3458,14 +3468,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, + else + intel_prepare_dp_ddi_buffers(encoder, crtc_state); + +- if (intel_phy_is_combo(dev_priv, phy)) { +- bool lane_reversal = +- dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; +- +- intel_combo_phy_power_up_lanes(dev_priv, phy, false, +- crtc_state->lane_count, +- lane_reversal); +- } ++ intel_ddi_power_up_lanes(encoder, crtc_state); + + intel_ddi_init_dp_buf_reg(encoder); + if (!is_mst) +@@ -3933,6 +3936,8 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, + intel_de_write(dev_priv, reg, val); + } + ++ intel_ddi_power_up_lanes(encoder, crtc_state); ++ + /* In HDMI/DVI mode, the port width, and swing/emphasis values + * are ignored so nothing special needs to be done besides + * enabling the port. +diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c +index aabf09f89cada..45c2556d63955 100644 +--- a/drivers/gpu/drm/i915/display/intel_display.c ++++ b/drivers/gpu/drm/i915/display/intel_display.c +@@ -2294,7 +2294,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, + */ + ret = i915_vma_pin_fence(vma); + if (ret != 0 && INTEL_GEN(dev_priv) < 4) { +- i915_gem_object_unpin_from_display_plane(vma); ++ i915_vma_unpin(vma); + vma = ERR_PTR(ret); + goto err; + } +@@ -2312,12 +2312,9 @@ err: + + void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) + { +- i915_gem_object_lock(vma->obj, NULL); + if (flags & PLANE_HAS_FENCE) + i915_vma_unpin_fence(vma); +- i915_gem_object_unpin_from_display_plane(vma); +- i915_gem_object_unlock(vma->obj); +- ++ i915_vma_unpin(vma); + i915_vma_put(vma); + } + +@@ -4883,6 +4880,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, + plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; + } else if (fb->format->is_yuv) { + plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE; ++ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) ++ plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; + } + + return plane_color_ctl; +diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c +index 5d745d9b99b2a..ecaa538b2d357 100644 +--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c ++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c +@@ -68,7 +68,9 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, + + slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, + connector->port, +- crtc_state->pbn, 0); ++ crtc_state->pbn, ++ drm_dp_get_vc_payload_bw(crtc_state->port_clock, ++ crtc_state->lane_count)); + if (slots == -EDEADLK) + return slots; + if (slots >= 0) +diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c +index 52b4f6193b4ce..0095c8cac9b40 100644 +--- a/drivers/gpu/drm/i915/display/intel_overlay.c ++++ b/drivers/gpu/drm/i915/display/intel_overlay.c +@@ -359,7 +359,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay) + intel_frontbuffer_flip_complete(overlay->i915, + INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe)); + +- i915_gem_object_unpin_from_display_plane(vma); ++ i915_vma_unpin(vma); + i915_vma_put(vma); + } + +@@ -860,7 +860,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, + return 0; + + out_unpin: +- i915_gem_object_unpin_from_display_plane(vma); ++ i915_vma_unpin(vma); + out_pin_section: + atomic_dec(&dev_priv->gpu_error.pending_fb_pin); + +diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c +index 63040cb0d4e10..12f7128b777f6 100644 +--- a/drivers/gpu/drm/i915/display/intel_sprite.c ++++ b/drivers/gpu/drm/i915/display/intel_sprite.c +@@ -469,13 +469,19 @@ skl_program_scaler(struct intel_plane *plane, + + /* Preoffset values for YUV to RGB Conversion */ + #define PREOFF_YUV_TO_RGB_HI 0x1800 +-#define PREOFF_YUV_TO_RGB_ME 0x1F00 ++#define PREOFF_YUV_TO_RGB_ME 0x0000 + #define PREOFF_YUV_TO_RGB_LO 0x1800 + + #define ROFF(x) (((x) & 0xffff) << 16) + #define GOFF(x) (((x) & 0xffff) << 0) + #define BOFF(x) (((x) & 0xffff) << 16) + ++/* ++ * Programs the input color space conversion stage for ICL HDR planes. ++ * Note that it is assumed that this stage always happens after YUV ++ * range correction. Thus, the input to this stage is assumed to be ++ * in full-range YCbCr. ++ */ + static void + icl_program_input_csc(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, +@@ -523,52 +529,7 @@ icl_program_input_csc(struct intel_plane *plane, + 0x0, 0x7800, 0x7F10, + }, + }; +- +- /* Matrix for Limited Range to Full Range Conversion */ +- static const u16 input_csc_matrix_lr[][9] = { +- /* +- * BT.601 Limted range YCbCr -> full range RGB +- * The matrix required is : +- * [1.164384, 0.000, 1.596027, +- * 1.164384, -0.39175, -0.812813, +- * 1.164384, 2.017232, 0.0000] +- */ +- [DRM_COLOR_YCBCR_BT601] = { +- 0x7CC8, 0x7950, 0x0, +- 0x8D00, 0x7950, 0x9C88, +- 0x0, 0x7950, 0x6810, +- }, +- /* +- * BT.709 Limited range YCbCr -> full range RGB +- * The matrix required is : +- * [1.164384, 0.000, 1.792741, +- * 1.164384, -0.213249, -0.532909, +- * 1.164384, 2.112402, 0.0000] +- */ +- [DRM_COLOR_YCBCR_BT709] = { +- 0x7E58, 0x7950, 0x0, +- 0x8888, 0x7950, 0xADA8, +- 0x0, 0x7950, 0x6870, +- }, +- /* +- * BT.2020 Limited range YCbCr -> full range RGB +- * The matrix required is : +- * [1.164, 0.000, 1.678, +- * 1.164, -0.1873, -0.6504, +- * 1.164, 2.1417, 0.0000] +- */ +- [DRM_COLOR_YCBCR_BT2020] = { +- 0x7D70, 0x7950, 0x0, +- 0x8A68, 0x7950, 0xAC00, +- 0x0, 0x7950, 0x6890, +- }, +- }; +- const u16 *csc; +- +- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) +- csc = input_csc_matrix[plane_state->hw.color_encoding]; +- else +- csc = input_csc_matrix_lr[plane_state->hw.color_encoding]; ++ const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding]; + + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), + ROFF(csc[0]) | GOFF(csc[1])); +@@ -585,14 +546,8 @@ icl_program_input_csc(struct intel_plane *plane, + + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0), + PREOFF_YUV_TO_RGB_HI); +- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) +- intel_de_write_fw(dev_priv, +- PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), +- 0); +- else +- intel_de_write_fw(dev_priv, +- PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), +- PREOFF_YUV_TO_RGB_ME); ++ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), ++ PREOFF_YUV_TO_RGB_ME); + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2), + PREOFF_YUV_TO_RGB_LO); + intel_de_write_fw(dev_priv, +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c +index fcce6909f2017..3d435bfff7649 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c +@@ -387,48 +387,6 @@ err: + return vma; + } + +-static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) +-{ +- struct drm_i915_private *i915 = to_i915(obj->base.dev); +- struct i915_vma *vma; +- +- if (list_empty(&obj->vma.list)) +- return; +- +- mutex_lock(&i915->ggtt.vm.mutex); +- spin_lock(&obj->vma.lock); +- for_each_ggtt_vma(vma, obj) { +- if (!drm_mm_node_allocated(&vma->node)) +- continue; +- +- GEM_BUG_ON(vma->vm != &i915->ggtt.vm); +- list_move_tail(&vma->vm_link, &vma->vm->bound_list); +- } +- spin_unlock(&obj->vma.lock); +- mutex_unlock(&i915->ggtt.vm.mutex); +- +- if (i915_gem_object_is_shrinkable(obj)) { +- unsigned long flags; +- +- spin_lock_irqsave(&i915->mm.obj_lock, flags); +- +- if (obj->mm.madv == I915_MADV_WILLNEED && +- !atomic_read(&obj->mm.shrink_pin)) +- list_move_tail(&obj->mm.link, &i915->mm.shrink_list); +- +- spin_unlock_irqrestore(&i915->mm.obj_lock, flags); +- } +-} +- +-void +-i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) +-{ +- /* Bump the LRU to try and avoid premature eviction whilst flipping */ +- i915_gem_object_bump_inactive_ggtt(vma->obj); +- +- i915_vma_unpin(vma); +-} +- + /** + * Moves a single object to the CPU read, and possibly write domain. + * @obj: object to act on +@@ -569,9 +527,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, + else + err = i915_gem_object_set_to_cpu_domain(obj, write_domain); + +- /* And bump the LRU for this access */ +- i915_gem_object_bump_inactive_ggtt(obj); +- + i915_gem_object_unlock(obj); + + if (write_domain) +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h +index d46db8d8f38e4..bc48717971204 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h ++++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h +@@ -471,7 +471,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, + u32 alignment, + const struct i915_ggtt_view *view, + unsigned int flags); +-void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); + + void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); + void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); +diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +index 0625cbb3b4312..0040b4765a54d 100644 +--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c ++++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +@@ -451,10 +451,12 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq) + struct intel_context *ce = rq->context; + bool release; + +- if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) ++ spin_lock(&ce->signal_lock); ++ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) { ++ spin_unlock(&ce->signal_lock); + return; ++ } + +- spin_lock(&ce->signal_lock); + list_del_rcu(&rq->signal_link); + release = remove_signaling_context(rq->engine->breadcrumbs, ce); + spin_unlock(&ce->signal_lock); +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index 8c73377ac82ca..3d004ca76b6ed 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -215,9 +215,17 @@ static const struct xpad_device { + { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, +- { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, +@@ -296,6 +304,9 @@ static const struct xpad_device { + { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 }, ++ { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, ++ { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 }, ++ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE }, + { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, + { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, +@@ -429,8 +440,12 @@ static const struct usb_device_id xpad_table[] = { + XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ + XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ + XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */ ++ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */ ++ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */ + XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ + XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */ ++ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */ ++ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */ + { } + }; + +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index 3a2dcf0805f12..c74b020796a94 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -219,6 +219,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { + DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), + DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), + }, ++ }, ++ { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"), + DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"), +diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c +index 6612f9e2d7e83..45113767db964 100644 +--- a/drivers/input/touchscreen/goodix.c ++++ b/drivers/input/touchscreen/goodix.c +@@ -157,6 +157,7 @@ static const struct goodix_chip_id goodix_chip_ids[] = { + { .id = "5663", .data = >1x_chip_data }, + { .id = "5688", .data = >1x_chip_data }, + { .id = "917S", .data = >1x_chip_data }, ++ { .id = "9286", .data = >1x_chip_data }, + + { .id = "911", .data = >911_chip_data }, + { .id = "9271", .data = >911_chip_data }, +@@ -1445,6 +1446,7 @@ static const struct of_device_id goodix_of_match[] = { + { .compatible = "goodix,gt927" }, + { .compatible = "goodix,gt9271" }, + { .compatible = "goodix,gt928" }, ++ { .compatible = "goodix,gt9286" }, + { .compatible = "goodix,gt967" }, + { } + }; +diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c +index 199cf3daec106..d8fccf048bf44 100644 +--- a/drivers/input/touchscreen/ili210x.c ++++ b/drivers/input/touchscreen/ili210x.c +@@ -29,11 +29,13 @@ struct ili2xxx_chip { + void *buf, size_t len); + int (*get_touch_data)(struct i2c_client *client, u8 *data); + bool (*parse_touch_data)(const u8 *data, unsigned int finger, +- unsigned int *x, unsigned int *y); ++ unsigned int *x, unsigned int *y, ++ unsigned int *z); + bool (*continue_polling)(const u8 *data, bool touch); + unsigned int max_touches; + unsigned int resolution; + bool has_calibrate_reg; ++ bool has_pressure_reg; + }; + + struct ili210x { +@@ -82,7 +84,8 @@ static int ili210x_read_touch_data(struct i2c_client *client, u8 *data) + + static bool ili210x_touchdata_to_coords(const u8 *touchdata, + unsigned int finger, +- unsigned int *x, unsigned int *y) ++ unsigned int *x, unsigned int *y, ++ unsigned int *z) + { + if (touchdata[0] & BIT(finger)) + return false; +@@ -137,7 +140,8 @@ static int ili211x_read_touch_data(struct i2c_client *client, u8 *data) + + static bool ili211x_touchdata_to_coords(const u8 *touchdata, + unsigned int finger, +- unsigned int *x, unsigned int *y) ++ unsigned int *x, unsigned int *y, ++ unsigned int *z) + { + u32 data; + +@@ -169,7 +173,8 @@ static const struct ili2xxx_chip ili211x_chip = { + + static bool ili212x_touchdata_to_coords(const u8 *touchdata, + unsigned int finger, +- unsigned int *x, unsigned int *y) ++ unsigned int *x, unsigned int *y, ++ unsigned int *z) + { + u16 val; + +@@ -235,7 +240,8 @@ static int ili251x_read_touch_data(struct i2c_client *client, u8 *data) + + static bool ili251x_touchdata_to_coords(const u8 *touchdata, + unsigned int finger, +- unsigned int *x, unsigned int *y) ++ unsigned int *x, unsigned int *y, ++ unsigned int *z) + { + u16 val; + +@@ -245,6 +251,7 @@ static bool ili251x_touchdata_to_coords(const u8 *touchdata, + + *x = val & 0x3fff; + *y = get_unaligned_be16(touchdata + 1 + (finger * 5) + 2); ++ *z = touchdata[1 + (finger * 5) + 4]; + + return true; + } +@@ -261,6 +268,7 @@ static const struct ili2xxx_chip ili251x_chip = { + .continue_polling = ili251x_check_continue_polling, + .max_touches = 10, + .has_calibrate_reg = true, ++ .has_pressure_reg = true, + }; + + static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata) +@@ -268,14 +276,16 @@ static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata) + struct input_dev *input = priv->input; + int i; + bool contact = false, touch; +- unsigned int x = 0, y = 0; ++ unsigned int x = 0, y = 0, z = 0; + + for (i = 0; i < priv->chip->max_touches; i++) { +- touch = priv->chip->parse_touch_data(touchdata, i, &x, &y); ++ touch = priv->chip->parse_touch_data(touchdata, i, &x, &y, &z); + + input_mt_slot(input, i); + if (input_mt_report_slot_state(input, MT_TOOL_FINGER, touch)) { + touchscreen_report_pos(input, &priv->prop, x, y, true); ++ if (priv->chip->has_pressure_reg) ++ input_report_abs(input, ABS_MT_PRESSURE, z); + contact = true; + } + } +@@ -437,6 +447,8 @@ static int ili210x_i2c_probe(struct i2c_client *client, + max_xy = (chip->resolution ?: SZ_64K) - 1; + input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_xy, 0, 0); + input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0); ++ if (priv->chip->has_pressure_reg) ++ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xa, 0, 0); + touchscreen_parse_properties(input, true, &priv->prop); + + error = input_mt_init_slots(input, priv->chip->max_touches, +diff --git a/drivers/md/md.c b/drivers/md/md.c +index 3be74cf3635fe..7a0a228d64bbe 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -639,8 +639,10 @@ static void md_submit_flush_data(struct work_struct *ws) + * could wait for this and below md_handle_request could wait for those + * bios because of suspend check + */ ++ spin_lock_irq(&mddev->lock); + mddev->last_flush = mddev->start_flush; + mddev->flush_bio = NULL; ++ spin_unlock_irq(&mddev->lock); + wake_up(&mddev->sb_wait); + + if (bio->bi_iter.bi_size == 0) { +diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c +index 44bea5e4aeda1..b23773583179d 100644 +--- a/drivers/mmc/core/sdio_cis.c ++++ b/drivers/mmc/core/sdio_cis.c +@@ -20,6 +20,8 @@ + #include "sdio_cis.h" + #include "sdio_ops.h" + ++#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */ ++ + static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, + const unsigned char *buf, unsigned size) + { +@@ -274,6 +276,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) + + do { + unsigned char tpl_code, tpl_link; ++ unsigned long timeout = jiffies + ++ msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS); + + ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code); + if (ret) +@@ -326,6 +330,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) + prev = &this->next; + + if (ret == -ENOENT) { ++ if (time_after(jiffies, timeout)) ++ break; + /* warn about unknown tuples */ + pr_warn_ratelimited("%s: queuing unknown" + " CIS tuple 0x%02x (%u bytes)\n", +diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h +index 6301b81cf5731..9bd717ff784be 100644 +--- a/drivers/mmc/host/sdhci-pltfm.h ++++ b/drivers/mmc/host/sdhci-pltfm.h +@@ -111,8 +111,13 @@ static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host) + return host->private; + } + ++extern const struct dev_pm_ops sdhci_pltfm_pmops; ++#ifdef CONFIG_PM_SLEEP + int sdhci_pltfm_suspend(struct device *dev); + int sdhci_pltfm_resume(struct device *dev); +-extern const struct dev_pm_ops sdhci_pltfm_pmops; ++#else ++static inline int sdhci_pltfm_suspend(struct device *dev) { return 0; } ++static inline int sdhci_pltfm_resume(struct device *dev) { return 0; } ++#endif + + #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */ +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c +index 34cca0a4b31c7..87160e723dfcf 100644 +--- a/drivers/net/dsa/mv88e6xxx/chip.c ++++ b/drivers/net/dsa/mv88e6xxx/chip.c +@@ -1669,7 +1669,11 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, + if (!entry.portvec) + entry.state = 0; + } else { +- entry.portvec |= BIT(port); ++ if (state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC) ++ entry.portvec = BIT(port); ++ else ++ entry.portvec |= BIT(port); ++ + entry.state = state; + } + +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c +index 627ce1a20473a..2f281d0f98070 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -5339,11 +5339,6 @@ static int ibmvnic_remove(struct vio_dev *dev) + unsigned long flags; + + spin_lock_irqsave(&adapter->state_lock, flags); +- if (test_bit(0, &adapter->resetting)) { +- spin_unlock_irqrestore(&adapter->state_lock, flags); +- return -EBUSY; +- } +- + adapter->state = VNIC_REMOVING; + spin_unlock_irqrestore(&adapter->state_lock, flags); + +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +index 2872c4dc77f07..3b269c70dcfe1 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +@@ -55,12 +55,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) + + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; + pfe.severity = PF_EVENT_SEVERITY_INFO; +- +- /* Always report link is down if the VF queues aren't enabled */ +- if (!vf->queues_enabled) { +- pfe.event_data.link_event.link_status = false; +- pfe.event_data.link_event.link_speed = 0; +- } else if (vf->link_forced) { ++ if (vf->link_forced) { + pfe.event_data.link_event.link_status = vf->link_up; + pfe.event_data.link_event.link_speed = + (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0); +@@ -70,7 +65,6 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) + pfe.event_data.link_event.link_speed = + i40e_virtchnl_link_speed(ls->link_speed); + } +- + i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, + 0, (u8 *)&pfe, sizeof(pfe), NULL); + } +@@ -2443,8 +2437,6 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) + } + } + +- vf->queues_enabled = true; +- + error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, +@@ -2466,9 +2458,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) + struct i40e_pf *pf = vf->pf; + i40e_status aq_ret = 0; + +- /* Immediately mark queues as disabled */ +- vf->queues_enabled = false; +- + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +index 5491215d81deb..091e32c1bb46f 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +@@ -98,7 +98,6 @@ struct i40e_vf { + unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ + bool link_forced; + bool link_up; /* only valid if VF link is forced */ +- bool queues_enabled; /* true if the VF queues are enabled */ + bool spoofchk; + u16 num_vlan; + +diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c +index 831f2f09de5fb..ec8cd69d49928 100644 +--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c ++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c +@@ -1714,7 +1714,8 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev, + Asym_Pause); + } + +- status = rd32(IGC_STATUS); ++ status = pm_runtime_suspended(&adapter->pdev->dev) ? ++ 0 : rd32(IGC_STATUS); + + if (status & IGC_STATUS_LU) { + if (status & IGC_STATUS_SPEED_1000) { +diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c +index 8b67d9b49a83a..7ec04e48860c6 100644 +--- a/drivers/net/ethernet/intel/igc/igc_i225.c ++++ b/drivers/net/ethernet/intel/igc/igc_i225.c +@@ -219,9 +219,9 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) + { + struct igc_nvm_info *nvm = &hw->nvm; ++ s32 ret_val = -IGC_ERR_NVM; + u32 attempts = 100000; + u32 i, k, eewr = 0; +- s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. +@@ -229,7 +229,6 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); +- ret_val = -IGC_ERR_NVM; + goto out; + } + +diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c +index 09cd0ec7ee87d..67b8ffd21d8af 100644 +--- a/drivers/net/ethernet/intel/igc/igc_mac.c ++++ b/drivers/net/ethernet/intel/igc/igc_mac.c +@@ -638,7 +638,7 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw) + } + + out: +- return 0; ++ return ret_val; + } + + /** +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +index a30eb90ba3d28..dd590086fe6a5 100644 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +@@ -29,16 +29,16 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) + /* Clear entry invalidation bit */ + pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; + +- /* Write tcam index - indirect access */ +- mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); +- for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) +- mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); +- + /* Write sram index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]); + ++ /* Write tcam index - indirect access */ ++ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); ++ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) ++ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); ++ + return 0; + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index c9b5d7f29911e..42848db8f8dd6 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -3593,12 +3593,10 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, + + err = mlx5e_safe_switch_channels(priv, &new_channels, + mlx5e_num_channels_changed_ctx, NULL); +- if (err) +- goto out; + +- priv->max_opened_tc = max_t(u8, priv->max_opened_tc, +- new_channels.params.num_tc); + out: ++ priv->max_opened_tc = max_t(u8, priv->max_opened_tc, ++ priv->channels.params.num_tc); + mutex_unlock(&priv->state_lock); + return err; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index 6628a0197b4e0..6d2ba8b84187c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -1262,8 +1262,10 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) + mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); + + if (mlx5e_cqe_regb_chain(cqe)) +- if (!mlx5e_tc_update_skb(cqe, skb)) ++ if (!mlx5e_tc_update_skb(cqe, skb)) { ++ dev_kfree_skb_any(skb); + goto free_wqe; ++ } + + napi_gro_receive(rq->cq.napi, skb); + +@@ -1316,8 +1318,10 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) + if (rep->vlan && skb_vlan_tag_present(skb)) + skb_vlan_pop(skb); + +- if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) ++ if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) { ++ dev_kfree_skb_any(skb); + goto free_wqe; ++ } + + napi_gro_receive(rq->cq.napi, skb); + +@@ -1371,8 +1375,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 + + mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); + +- if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) ++ if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) { ++ dev_kfree_skb_any(skb); + goto mpwrq_cqe_out; ++ } + + napi_gro_receive(rq->cq.napi, skb); + +@@ -1528,8 +1534,10 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq + mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); + + if (mlx5e_cqe_regb_chain(cqe)) +- if (!mlx5e_tc_update_skb(cqe, skb)) ++ if (!mlx5e_tc_update_skb(cqe, skb)) { ++ dev_kfree_skb_any(skb); + goto mpwrq_cqe_out; ++ } + + napi_gro_receive(rq->cq.napi, skb); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 634c2bfd25be1..79fc5755735fa 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -1764,6 +1764,7 @@ search_again_locked: + if (!fte_tmp) + continue; + rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp); ++ /* No error check needed here, because insert_fte() is not called */ + up_write_ref_node(&fte_tmp->node, false); + tree_put_node(&fte_tmp->node, false); + kmem_cache_free(steering->ftes_cache, fte); +@@ -1816,6 +1817,8 @@ skip_search: + up_write_ref_node(&g->node, false); + rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); + up_write_ref_node(&fte->node, false); ++ if (IS_ERR(rule)) ++ tree_put_node(&fte->node, false); + return rule; + } + rule = ERR_PTR(-ENOENT); +@@ -1914,6 +1917,8 @@ search_again_locked: + up_write_ref_node(&g->node, false); + rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); + up_write_ref_node(&fte->node, false); ++ if (IS_ERR(rule)) ++ tree_put_node(&fte->node, false); + tree_put_node(&g->node, false); + return rule; + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +index a3e0c71831928..a44a2bad5bbb5 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +@@ -76,7 +76,7 @@ enum { + + static u32 get_function(u16 func_id, bool ec_function) + { +- return func_id & (ec_function << 16); ++ return (u32)func_id | (ec_function << 16); + } + + static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function) +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c +index 762cabf16157b..75f774347f6d1 100644 +--- a/drivers/net/ethernet/realtek/r8169_main.c ++++ b/drivers/net/ethernet/realtek/r8169_main.c +@@ -4082,17 +4082,72 @@ err_out: + return -EIO; + } + +-static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp) ++static bool rtl_skb_is_udp(struct sk_buff *skb) ++{ ++ int no = skb_network_offset(skb); ++ struct ipv6hdr *i6h, _i6h; ++ struct iphdr *ih, _ih; ++ ++ switch (vlan_get_protocol(skb)) { ++ case htons(ETH_P_IP): ++ ih = skb_header_pointer(skb, no, sizeof(_ih), &_ih); ++ return ih && ih->protocol == IPPROTO_UDP; ++ case htons(ETH_P_IPV6): ++ i6h = skb_header_pointer(skb, no, sizeof(_i6h), &_i6h); ++ return i6h && i6h->nexthdr == IPPROTO_UDP; ++ default: ++ return false; ++ } ++} ++ ++#define RTL_MIN_PATCH_LEN 47 ++ ++/* see rtl8125_get_patch_pad_len() in r8125 vendor driver */ ++static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp, ++ struct sk_buff *skb) + { ++ unsigned int padto = 0, len = skb->len; ++ ++ if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN && ++ rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) { ++ unsigned int trans_data_len = skb_tail_pointer(skb) - ++ skb_transport_header(skb); ++ ++ if (trans_data_len >= offsetof(struct udphdr, len) && ++ trans_data_len < RTL_MIN_PATCH_LEN) { ++ u16 dest = ntohs(udp_hdr(skb)->dest); ++ ++ /* dest is a standard PTP port */ ++ if (dest == 319 || dest == 320) ++ padto = len + RTL_MIN_PATCH_LEN - trans_data_len; ++ } ++ ++ if (trans_data_len < sizeof(struct udphdr)) ++ padto = max_t(unsigned int, padto, ++ len + sizeof(struct udphdr) - trans_data_len); ++ } ++ ++ return padto; ++} ++ ++static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp, ++ struct sk_buff *skb) ++{ ++ unsigned int padto; ++ ++ padto = rtl8125_quirk_udp_padto(tp, skb); ++ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_60: + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: +- return true; ++ padto = max_t(unsigned int, padto, ETH_ZLEN); + default: +- return false; ++ break; + } ++ ++ return padto; + } + + static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts) +@@ -4164,9 +4219,10 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, + + opts[1] |= transport_offset << TCPHO_SHIFT; + } else { +- if (unlikely(skb->len < ETH_ZLEN && rtl_test_hw_pad_bug(tp))) +- /* eth_skb_pad would free the skb on error */ +- return !__skb_put_padto(skb, ETH_ZLEN, false); ++ unsigned int padto = rtl_quirk_packet_padto(tp, skb); ++ ++ /* skb_padto would free the skb on error */ ++ return !__skb_put_padto(skb, padto, false); + } + + return true; +@@ -4349,6 +4405,9 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb, + if (skb->len < ETH_ZLEN) + features &= ~NETIF_F_CSUM_MASK; + ++ if (rtl_quirk_packet_padto(tp, skb)) ++ features &= ~NETIF_F_CSUM_MASK; ++ + if (transport_offset > TCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_CSUM_MASK; +@@ -4694,10 +4753,10 @@ static int rtl8169_close(struct net_device *dev) + + cancel_work_sync(&tp->wk.work); + +- phy_disconnect(tp->phydev); +- + free_irq(pci_irq_vector(pdev, 0), tp); + ++ phy_disconnect(tp->phydev); ++ + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, +diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c +index 6bfac1efe037c..4a68da7115d19 100644 +--- a/drivers/net/ipa/gsi.c ++++ b/drivers/net/ipa/gsi.c +@@ -1256,7 +1256,7 @@ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) + /* Hardware requires a 2^n ring size, with alignment equal to size */ + ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); + if (ring->virt && addr % size) { +- dma_free_coherent(dev, size, ring->virt, ring->addr); ++ dma_free_coherent(dev, size, ring->virt, addr); + dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n", + size); + return -EINVAL; /* Not a good error value, but distinct */ +diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c +index b59032e0859b7..9d208570d059a 100644 +--- a/drivers/nvdimm/dimm_devs.c ++++ b/drivers/nvdimm/dimm_devs.c +@@ -335,16 +335,16 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr, + } + static DEVICE_ATTR_RO(state); + +-static ssize_t available_slots_show(struct device *dev, +- struct device_attribute *attr, char *buf) ++static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf) + { +- struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); ++ struct device *dev; + ssize_t rc; + u32 nfree; + + if (!ndd) + return -ENXIO; + ++ dev = ndd->dev; + nvdimm_bus_lock(dev); + nfree = nd_label_nfree(ndd); + if (nfree - 1 > nfree) { +@@ -356,6 +356,18 @@ static ssize_t available_slots_show(struct device *dev, + nvdimm_bus_unlock(dev); + return rc; + } ++ ++static ssize_t available_slots_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ ssize_t rc; ++ ++ nd_device_lock(dev); ++ rc = __available_slots_show(dev_get_drvdata(dev), buf); ++ nd_device_unlock(dev); ++ ++ return rc; ++} + static DEVICE_ATTR_RO(available_slots); + + __weak ssize_t security_show(struct device *dev, +diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c +index 6da67f4d641a2..2403b71b601e9 100644 +--- a/drivers/nvdimm/namespace_devs.c ++++ b/drivers/nvdimm/namespace_devs.c +@@ -1635,11 +1635,11 @@ static umode_t namespace_visible(struct kobject *kobj, + return a->mode; + } + +- if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr +- || a == &dev_attr_holder.attr +- || a == &dev_attr_holder_class.attr +- || a == &dev_attr_force_raw.attr +- || a == &dev_attr_mode.attr) ++ /* base is_namespace_io() attributes */ ++ if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr || ++ a == &dev_attr_holder.attr || a == &dev_attr_holder_class.attr || ++ a == &dev_attr_force_raw.attr || a == &dev_attr_mode.attr || ++ a == &dev_attr_resource.attr) + return a->mode; + + return 0; +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index a3486c1c27f0c..a32494cde61f7 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -3262,6 +3262,8 @@ static const struct pci_device_id nvme_id_table[] = { + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, ++ { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ ++ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), + .driver_data = NVME_QUIRK_SINGLE_VECTOR }, + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, +diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c +index dc1f0f6471896..aacf06f0b4312 100644 +--- a/drivers/nvme/target/tcp.c ++++ b/drivers/nvme/target/tcp.c +@@ -305,7 +305,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) + length = cmd->pdu_len; + cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE); + offset = cmd->rbytes_done; +- cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE); ++ cmd->sg_idx = offset / PAGE_SIZE; + sg_offset = offset % PAGE_SIZE; + sg = &cmd->req.sg[cmd->sg_idx]; + +@@ -318,6 +318,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) + length -= iov_len; + sg = sg_next(sg); + iov++; ++ sg_offset = 0; + } + + iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov, +diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c +index a5f988a9f9482..b5442f979b4d0 100644 +--- a/drivers/thunderbolt/acpi.c ++++ b/drivers/thunderbolt/acpi.c +@@ -56,7 +56,7 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data, + * managed with the xHCI and the SuperSpeed hub so we create the + * link from xHCI instead. + */ +- while (!dev_is_pci(dev)) ++ while (dev && !dev_is_pci(dev)) + dev = dev->parent; + + if (!dev) +diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c +index 134dc2005ce97..c9f6e97582885 100644 +--- a/drivers/usb/class/usblp.c ++++ b/drivers/usb/class/usblp.c +@@ -1329,14 +1329,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol) + if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL) + return -EINVAL; + +- alts = usblp->protocol[protocol].alt_setting; +- if (alts < 0) +- return -EINVAL; +- r = usb_set_interface(usblp->dev, usblp->ifnum, alts); +- if (r < 0) { +- printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", +- alts, usblp->ifnum); +- return r; ++ /* Don't unnecessarily set the interface if there's a single alt. */ ++ if (usblp->intf->num_altsetting > 1) { ++ alts = usblp->protocol[protocol].alt_setting; ++ if (alts < 0) ++ return -EINVAL; ++ r = usb_set_interface(usblp->dev, usblp->ifnum, alts); ++ if (r < 0) { ++ printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", ++ alts, usblp->ifnum); ++ return r; ++ } + } + + usblp->bidir = (usblp->protocol[protocol].epread != NULL); +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c +index 0a0d11151cfb8..ad4c94366dadf 100644 +--- a/drivers/usb/dwc2/gadget.c ++++ b/drivers/usb/dwc2/gadget.c +@@ -1543,7 +1543,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep, + static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, + u32 windex) + { +- struct dwc2_hsotg_ep *ep; + int dir = (windex & USB_DIR_IN) ? 1 : 0; + int idx = windex & 0x7F; + +@@ -1553,12 +1552,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, + if (idx > hsotg->num_of_eps) + return NULL; + +- ep = index_to_ep(hsotg, idx, dir); +- +- if (idx && ep->dir_in != dir) +- return NULL; +- +- return ep; ++ return index_to_ep(hsotg, idx, dir); + } + + /** +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c +index 841daec70b6ef..3101f0dcf6ae8 100644 +--- a/drivers/usb/dwc3/core.c ++++ b/drivers/usb/dwc3/core.c +@@ -1758,7 +1758,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) + if (PMSG_IS_AUTO(msg)) + break; + +- ret = dwc3_core_init(dwc); ++ ret = dwc3_core_init_for_resume(dwc); + if (ret) + return ret; + +diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c +index 30313b233680d..99c7fc0d1d597 100644 +--- a/drivers/usb/gadget/legacy/ether.c ++++ b/drivers/usb/gadget/legacy/ether.c +@@ -403,8 +403,10 @@ static int eth_bind(struct usb_composite_dev *cdev) + struct usb_descriptor_header *usb_desc; + + usb_desc = usb_otg_descriptor_alloc(gadget); +- if (!usb_desc) ++ if (!usb_desc) { ++ status = -ENOMEM; + goto fail1; ++ } + usb_otg_descriptor_init(gadget, usb_desc); + otg_desc[0] = usb_desc; + otg_desc[1] = NULL; +diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c +index 6497185ec4e7a..bfd8e77788e29 100644 +--- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c ++++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c +@@ -999,8 +999,10 @@ static int ast_vhub_of_parse_str_desc(struct ast_vhub *vhub, + str_array[offset].s = NULL; + + ret = ast_vhub_str_alloc_add(vhub, &lang_str); +- if (ret) ++ if (ret) { ++ of_node_put(child); + break; ++ } + } + + return ret; +diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c +index 45c54d56ecbd5..b45e5bf089979 100644 +--- a/drivers/usb/host/xhci-mtk-sch.c ++++ b/drivers/usb/host/xhci-mtk-sch.c +@@ -200,6 +200,8 @@ static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev, + + sch_ep->sch_tt = tt; + sch_ep->ep = ep; ++ INIT_LIST_HEAD(&sch_ep->endpoint); ++ INIT_LIST_HEAD(&sch_ep->tt_endpoint); + + return sch_ep; + } +@@ -373,6 +375,7 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw, + sch_ep->bw_budget_table[j]; + } + } ++ sch_ep->allocated = used; + } + + static int check_sch_tt(struct usb_device *udev, +@@ -541,6 +544,22 @@ static int check_sch_bw(struct usb_device *udev, + return 0; + } + ++static void destroy_sch_ep(struct usb_device *udev, ++ struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep) ++{ ++ /* only release ep bw check passed by check_sch_bw() */ ++ if (sch_ep->allocated) ++ update_bus_bw(sch_bw, sch_ep, 0); ++ ++ list_del(&sch_ep->endpoint); ++ ++ if (sch_ep->sch_tt) { ++ list_del(&sch_ep->tt_endpoint); ++ drop_tt(udev); ++ } ++ kfree(sch_ep); ++} ++ + static bool need_bw_sch(struct usb_host_endpoint *ep, + enum usb_device_speed speed, int has_tt) + { +@@ -583,6 +602,8 @@ int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk) + + mtk->sch_array = sch_array; + ++ INIT_LIST_HEAD(&mtk->bw_ep_chk_list); ++ + return 0; + } + EXPORT_SYMBOL_GPL(xhci_mtk_sch_init); +@@ -601,19 +622,14 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + struct xhci_ep_ctx *ep_ctx; + struct xhci_slot_ctx *slot_ctx; + struct xhci_virt_device *virt_dev; +- struct mu3h_sch_bw_info *sch_bw; + struct mu3h_sch_ep_info *sch_ep; +- struct mu3h_sch_bw_info *sch_array; + unsigned int ep_index; +- int bw_index; +- int ret = 0; + + xhci = hcd_to_xhci(hcd); + virt_dev = xhci->devs[udev->slot_id]; + ep_index = xhci_get_endpoint_index(&ep->desc); + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); +- sch_array = mtk->sch_array; + + xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n", + __func__, usb_endpoint_type(&ep->desc), udev->speed, +@@ -632,35 +648,13 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + return 0; + } + +- bw_index = get_bw_index(xhci, udev, ep); +- sch_bw = &sch_array[bw_index]; +- + sch_ep = create_sch_ep(udev, ep, ep_ctx); + if (IS_ERR_OR_NULL(sch_ep)) + return -ENOMEM; + + setup_sch_info(udev, ep_ctx, sch_ep); + +- ret = check_sch_bw(udev, sch_bw, sch_ep); +- if (ret) { +- xhci_err(xhci, "Not enough bandwidth!\n"); +- if (is_fs_or_ls(udev->speed)) +- drop_tt(udev); +- +- kfree(sch_ep); +- return -ENOSPC; +- } +- +- list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list); +- +- ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts) +- | EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode)); +- ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset) +- | EP_BREPEAT(sch_ep->repeat)); +- +- xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n", +- sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode, +- sch_ep->offset, sch_ep->repeat); ++ list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_chk_list); + + return 0; + } +@@ -675,7 +669,7 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + struct xhci_virt_device *virt_dev; + struct mu3h_sch_bw_info *sch_array; + struct mu3h_sch_bw_info *sch_bw; +- struct mu3h_sch_ep_info *sch_ep; ++ struct mu3h_sch_ep_info *sch_ep, *tmp; + int bw_index; + + xhci = hcd_to_xhci(hcd); +@@ -694,17 +688,79 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + bw_index = get_bw_index(xhci, udev, ep); + sch_bw = &sch_array[bw_index]; + +- list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) { ++ list_for_each_entry_safe(sch_ep, tmp, &sch_bw->bw_ep_list, endpoint) { + if (sch_ep->ep == ep) { +- update_bus_bw(sch_bw, sch_ep, 0); +- list_del(&sch_ep->endpoint); +- if (is_fs_or_ls(udev->speed)) { +- list_del(&sch_ep->tt_endpoint); +- drop_tt(udev); +- } +- kfree(sch_ep); ++ destroy_sch_ep(udev, sch_bw, sch_ep); + break; + } + } + } + EXPORT_SYMBOL_GPL(xhci_mtk_drop_ep_quirk); ++ ++int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ++{ ++ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd); ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd); ++ struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; ++ struct mu3h_sch_bw_info *sch_bw; ++ struct mu3h_sch_ep_info *sch_ep, *tmp; ++ int bw_index, ret; ++ ++ xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev)); ++ ++ list_for_each_entry(sch_ep, &mtk->bw_ep_chk_list, endpoint) { ++ bw_index = get_bw_index(xhci, udev, sch_ep->ep); ++ sch_bw = &mtk->sch_array[bw_index]; ++ ++ ret = check_sch_bw(udev, sch_bw, sch_ep); ++ if (ret) { ++ xhci_err(xhci, "Not enough bandwidth!\n"); ++ return -ENOSPC; ++ } ++ } ++ ++ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) { ++ struct xhci_ep_ctx *ep_ctx; ++ struct usb_host_endpoint *ep = sch_ep->ep; ++ unsigned int ep_index = xhci_get_endpoint_index(&ep->desc); ++ ++ bw_index = get_bw_index(xhci, udev, ep); ++ sch_bw = &mtk->sch_array[bw_index]; ++ ++ list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list); ++ ++ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); ++ ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts) ++ | EP_BCSCOUNT(sch_ep->cs_count) ++ | EP_BBM(sch_ep->burst_mode)); ++ ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset) ++ | EP_BREPEAT(sch_ep->repeat)); ++ ++ xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n", ++ sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode, ++ sch_ep->offset, sch_ep->repeat); ++ } ++ ++ return xhci_check_bandwidth(hcd, udev); ++} ++EXPORT_SYMBOL_GPL(xhci_mtk_check_bandwidth); ++ ++void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ++{ ++ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd); ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd); ++ struct mu3h_sch_bw_info *sch_bw; ++ struct mu3h_sch_ep_info *sch_ep, *tmp; ++ int bw_index; ++ ++ xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev)); ++ ++ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) { ++ bw_index = get_bw_index(xhci, udev, sch_ep->ep); ++ sch_bw = &mtk->sch_array[bw_index]; ++ destroy_sch_ep(udev, sch_bw, sch_ep); ++ } ++ ++ xhci_reset_bandwidth(hcd, udev); ++} ++EXPORT_SYMBOL_GPL(xhci_mtk_reset_bandwidth); +diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c +index 8f321f39ab960..fe010cc61f19b 100644 +--- a/drivers/usb/host/xhci-mtk.c ++++ b/drivers/usb/host/xhci-mtk.c +@@ -347,6 +347,8 @@ static void usb_wakeup_set(struct xhci_hcd_mtk *mtk, bool enable) + static int xhci_mtk_setup(struct usb_hcd *hcd); + static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = { + .reset = xhci_mtk_setup, ++ .check_bandwidth = xhci_mtk_check_bandwidth, ++ .reset_bandwidth = xhci_mtk_reset_bandwidth, + }; + + static struct hc_driver __read_mostly xhci_mtk_hc_driver; +diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h +index a93cfe8179049..cbb09dfea62e0 100644 +--- a/drivers/usb/host/xhci-mtk.h ++++ b/drivers/usb/host/xhci-mtk.h +@@ -59,6 +59,7 @@ struct mu3h_sch_bw_info { + * @ep_type: endpoint type + * @maxpkt: max packet size of endpoint + * @ep: address of usb_host_endpoint struct ++ * @allocated: the bandwidth is aready allocated from bus_bw + * @offset: which uframe of the interval that transfer should be + * scheduled first time within the interval + * @repeat: the time gap between two uframes that transfers are +@@ -86,6 +87,7 @@ struct mu3h_sch_ep_info { + u32 ep_type; + u32 maxpkt; + void *ep; ++ bool allocated; + /* + * mtk xHCI scheduling information put into reserved DWs + * in ep context +@@ -131,6 +133,7 @@ struct xhci_hcd_mtk { + struct device *dev; + struct usb_hcd *hcd; + struct mu3h_sch_bw_info *sch_array; ++ struct list_head bw_ep_chk_list; + struct mu3c_ippc_regs __iomem *ippc_regs; + bool has_ippc; + int num_u2_ports; +@@ -166,6 +169,8 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep); + void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep); ++int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); ++void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); + + #else + static inline int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, +@@ -179,6 +184,16 @@ static inline void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, + { + } + ++static inline int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, ++ struct usb_device *udev) ++{ ++ return 0; ++} ++ ++static inline void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, ++ struct usb_device *udev) ++{ ++} + #endif + + #endif /* _XHCI_MTK_H_ */ +diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c +index 60651a50770f9..8ca1a235d1645 100644 +--- a/drivers/usb/host/xhci-mvebu.c ++++ b/drivers/usb/host/xhci-mvebu.c +@@ -8,6 +8,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -74,6 +75,47 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) + return 0; + } + ++int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd) ++{ ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd); ++ struct device *dev = hcd->self.controller; ++ struct phy *phy; ++ int ret; ++ ++ /* Old bindings miss the PHY handle */ ++ phy = of_phy_get(dev->of_node, "usb3-phy"); ++ if (IS_ERR(phy) && PTR_ERR(phy) == -EPROBE_DEFER) ++ return -EPROBE_DEFER; ++ else if (IS_ERR(phy)) ++ goto phy_out; ++ ++ ret = phy_init(phy); ++ if (ret) ++ goto phy_put; ++ ++ ret = phy_set_mode(phy, PHY_MODE_USB_HOST_SS); ++ if (ret) ++ goto phy_exit; ++ ++ ret = phy_power_on(phy); ++ if (ret == -EOPNOTSUPP) { ++ /* Skip initializatin of XHCI PHY when it is unsupported by firmware */ ++ dev_warn(dev, "PHY unsupported by firmware\n"); ++ xhci->quirks |= XHCI_SKIP_PHY_INIT; ++ } ++ if (ret) ++ goto phy_exit; ++ ++ phy_power_off(phy); ++phy_exit: ++ phy_exit(phy); ++phy_put: ++ of_phy_put(phy); ++phy_out: ++ ++ return 0; ++} ++ + int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) + { + struct xhci_hcd *xhci = hcd_to_xhci(hcd); +diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h +index 3be021793cc8b..01bf3fcb3eca5 100644 +--- a/drivers/usb/host/xhci-mvebu.h ++++ b/drivers/usb/host/xhci-mvebu.h +@@ -12,6 +12,7 @@ struct usb_hcd; + + #if IS_ENABLED(CONFIG_USB_XHCI_MVEBU) + int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd); ++int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd); + int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd); + #else + static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) +@@ -19,6 +20,11 @@ static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) + return 0; + } + ++static inline int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd) ++{ ++ return 0; ++} ++ + static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) + { + return 0; +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c +index 4d34f6005381e..c1edcc9b13cec 100644 +--- a/drivers/usb/host/xhci-plat.c ++++ b/drivers/usb/host/xhci-plat.c +@@ -44,6 +44,16 @@ static void xhci_priv_plat_start(struct usb_hcd *hcd) + priv->plat_start(hcd); + } + ++static int xhci_priv_plat_setup(struct usb_hcd *hcd) ++{ ++ struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); ++ ++ if (!priv->plat_setup) ++ return 0; ++ ++ return priv->plat_setup(hcd); ++} ++ + static int xhci_priv_init_quirk(struct usb_hcd *hcd) + { + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); +@@ -111,6 +121,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = { + }; + + static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = { ++ .plat_setup = xhci_mvebu_a3700_plat_setup, + .init_quirk = xhci_mvebu_a3700_init_quirk, + }; + +@@ -330,7 +341,14 @@ static int xhci_plat_probe(struct platform_device *pdev) + + hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node); + xhci->shared_hcd->tpl_support = hcd->tpl_support; +- if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT)) ++ ++ if (priv) { ++ ret = xhci_priv_plat_setup(hcd); ++ if (ret) ++ goto disable_usb_phy; ++ } ++ ++ if ((xhci->quirks & XHCI_SKIP_PHY_INIT) || (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))) + hcd->skip_phy_initialization = 1; + + if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK)) +diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h +index 1fb149d1fbcea..561d0b7bce098 100644 +--- a/drivers/usb/host/xhci-plat.h ++++ b/drivers/usb/host/xhci-plat.h +@@ -13,6 +13,7 @@ + struct xhci_plat_priv { + const char *firmware_name; + unsigned long long quirks; ++ int (*plat_setup)(struct usb_hcd *); + void (*plat_start)(struct usb_hcd *); + int (*init_quirk)(struct usb_hcd *); + int (*suspend_quirk)(struct usb_hcd *); +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index db8612ec82d3e..061d5c51405fb 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -699,11 +699,16 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, + dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, + DMA_FROM_DEVICE); + /* for in tranfers we need to copy the data from bounce to sg */ +- len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, +- seg->bounce_len, seg->bounce_offs); +- if (len != seg->bounce_len) +- xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", +- len, seg->bounce_len); ++ if (urb->num_sgs) { ++ len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, ++ seg->bounce_len, seg->bounce_offs); ++ if (len != seg->bounce_len) ++ xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", ++ len, seg->bounce_len); ++ } else { ++ memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf, ++ seg->bounce_len); ++ } + seg->bounce_len = 0; + seg->bounce_offs = 0; + } +@@ -3275,12 +3280,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, + + /* create a max max_pkt sized bounce buffer pointed to by last trb */ + if (usb_urb_dir_out(urb)) { +- len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, +- seg->bounce_buf, new_buff_len, enqd_len); +- if (len != new_buff_len) +- xhci_warn(xhci, +- "WARN Wrong bounce buffer write length: %zu != %d\n", +- len, new_buff_len); ++ if (urb->num_sgs) { ++ len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, ++ seg->bounce_buf, new_buff_len, enqd_len); ++ if (len != new_buff_len) ++ xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n", ++ len, new_buff_len); ++ } else { ++ memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len); ++ } ++ + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, + max_pkt, DMA_TO_DEVICE); + } else { +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 73f1373d517a2..d17bbb162810a 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -2861,7 +2861,7 @@ static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, + * else should be touching the xhci->devs[slot_id] structure, so we + * don't need to take the xhci->lock for manipulating that. + */ +-static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ++int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) + { + int i; + int ret = 0; +@@ -2959,7 +2959,7 @@ command_cleanup: + return ret; + } + +-static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ++void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) + { + struct xhci_hcd *xhci; + struct xhci_virt_device *virt_dev; +@@ -5385,6 +5385,10 @@ void xhci_init_driver(struct hc_driver *drv, + drv->reset = over->reset; + if (over->start) + drv->start = over->start; ++ if (over->check_bandwidth) ++ drv->check_bandwidth = over->check_bandwidth; ++ if (over->reset_bandwidth) ++ drv->reset_bandwidth = over->reset_bandwidth; + } + } + EXPORT_SYMBOL_GPL(xhci_init_driver); +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index d90c0d5df3b37..045740ad9c1ec 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -1916,6 +1916,8 @@ struct xhci_driver_overrides { + size_t extra_priv_size; + int (*reset)(struct usb_hcd *hcd); + int (*start)(struct usb_hcd *hcd); ++ int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); ++ void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); + }; + + #define XHCI_CFC_DELAY 10 +@@ -2070,6 +2072,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); + void xhci_shutdown(struct usb_hcd *hcd); + void xhci_init_driver(struct hc_driver *drv, + const struct xhci_driver_overrides *over); ++int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); ++void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); + int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); + int xhci_ext_cap_init(struct xhci_hcd *xhci); + +diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c +index ac9a81ae82164..e6fa137018082 100644 +--- a/drivers/usb/renesas_usbhs/fifo.c ++++ b/drivers/usb/renesas_usbhs/fifo.c +@@ -126,6 +126,7 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) + } + + usbhs_pipe_clear_without_sequence(pipe, 0, 0); ++ usbhs_pipe_running(pipe, 0); + + __usbhsf_pkt_del(pkt); + } +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index d0c05aa8a0d6e..bf11f86896837 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -64,6 +64,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ + { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ + { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ ++ { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */ + { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */ + { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ + { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ +@@ -204,6 +205,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ + { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ + { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ ++ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */ + { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 3fe959104311b..2049e66f34a3f 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -425,6 +425,8 @@ static void option_instat_callback(struct urb *urb); + #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084 + #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085 + #define CINTERION_PRODUCT_CLS8 0x00b0 ++#define CINTERION_PRODUCT_MV31_MBIM 0x00b3 ++#define CINTERION_PRODUCT_MV31_RMNET 0x00b7 + + /* Olivetti products */ + #define OLIVETTI_VENDOR_ID 0x0b3c +@@ -1914,6 +1916,10 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff), ++ .driver_info = RSVD(3)}, ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff), ++ .driver_info = RSVD(0)}, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), + .driver_info = RSVD(4) }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), +diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h +index 5c92a576edae8..08f742fd24099 100644 +--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h ++++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h +@@ -15,6 +15,7 @@ struct mlx5_vdpa_direct_mr { + struct sg_table sg_head; + int log_size; + int nsg; ++ int nent; + struct list_head list; + u64 offset; + }; +diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c +index 4b6195666c589..d300f799efcd1 100644 +--- a/drivers/vdpa/mlx5/core/mr.c ++++ b/drivers/vdpa/mlx5/core/mr.c +@@ -25,17 +25,6 @@ static int get_octo_len(u64 len, int page_shift) + return (npages + 1) / 2; + } + +-static void fill_sg(struct mlx5_vdpa_direct_mr *mr, void *in) +-{ +- struct scatterlist *sg; +- __be64 *pas; +- int i; +- +- pas = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); +- for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i) +- (*pas) = cpu_to_be64(sg_dma_address(sg)); +-} +- + static void mlx5_set_access_mode(void *mkc, int mode) + { + MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3); +@@ -45,10 +34,18 @@ static void mlx5_set_access_mode(void *mkc, int mode) + static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt) + { + struct scatterlist *sg; ++ int nsg = mr->nsg; ++ u64 dma_addr; ++ u64 dma_len; ++ int j = 0; + int i; + +- for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i) +- mtt[i] = cpu_to_be64(sg_dma_address(sg)); ++ for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) { ++ for (dma_addr = sg_dma_address(sg), dma_len = sg_dma_len(sg); ++ nsg && dma_len; ++ nsg--, dma_addr += BIT(mr->log_size), dma_len -= BIT(mr->log_size)) ++ mtt[j++] = cpu_to_be64(dma_addr); ++ } + } + + static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr) +@@ -64,7 +61,6 @@ static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct + return -ENOMEM; + + MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid); +- fill_sg(mr, in); + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO)); + MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO)); +@@ -276,8 +272,8 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr + done: + mr->log_size = log_entity_size; + mr->nsg = nsg; +- err = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); +- if (!err) ++ mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); ++ if (!mr->nent) + goto err_map; + + err = create_direct_mr(mvdev, mr); +diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c +index 81b932f72e103..c6529f7c3034a 100644 +--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c ++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c +@@ -77,6 +77,7 @@ struct mlx5_vq_restore_info { + u64 device_addr; + u64 driver_addr; + u16 avail_index; ++ u16 used_index; + bool ready; + struct vdpa_callback cb; + bool restore; +@@ -111,6 +112,7 @@ struct mlx5_vdpa_virtqueue { + u32 virtq_id; + struct mlx5_vdpa_net *ndev; + u16 avail_idx; ++ u16 used_idx; + int fw_state; + + /* keep last in the struct */ +@@ -789,6 +791,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque + + obj_context = MLX5_ADDR_OF(create_virtio_net_q_in, in, obj_context); + MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx); ++ MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx); + MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_12_3, + get_features_12_3(ndev->mvdev.actual_features)); + vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context); +@@ -1007,6 +1010,7 @@ static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m + struct mlx5_virtq_attr { + u8 state; + u16 available_index; ++ u16 used_index; + }; + + static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, +@@ -1037,6 +1041,7 @@ static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueu + memset(attr, 0, sizeof(*attr)); + attr->state = MLX5_GET(virtio_net_q_object, obj_context, state); + attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index); ++ attr->used_index = MLX5_GET(virtio_net_q_object, obj_context, hw_used_index); + kfree(out); + return 0; + +@@ -1520,6 +1525,16 @@ static void teardown_virtqueues(struct mlx5_vdpa_net *ndev) + } + } + ++static void clear_virtqueues(struct mlx5_vdpa_net *ndev) ++{ ++ int i; ++ ++ for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) { ++ ndev->vqs[i].avail_idx = 0; ++ ndev->vqs[i].used_idx = 0; ++ } ++} ++ + /* TODO: cross-endian support */ + static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev) + { +@@ -1595,6 +1610,7 @@ static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqu + return err; + + ri->avail_index = attr.available_index; ++ ri->used_index = attr.used_index; + ri->ready = mvq->ready; + ri->num_ent = mvq->num_ent; + ri->desc_addr = mvq->desc_addr; +@@ -1639,6 +1655,7 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev) + continue; + + mvq->avail_idx = ri->avail_index; ++ mvq->used_idx = ri->used_index; + mvq->ready = ri->ready; + mvq->num_ent = ri->num_ent; + mvq->desc_addr = ri->desc_addr; +@@ -1753,6 +1770,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) + if (!status) { + mlx5_vdpa_info(mvdev, "performing device reset\n"); + teardown_driver(ndev); ++ clear_virtqueues(ndev); + mlx5_vdpa_destroy_mr(&ndev->mvdev); + ndev->mvdev.status = 0; + ndev->mvdev.mlx_features = 0; +diff --git a/fs/afs/main.c b/fs/afs/main.c +index accdd8970e7c0..b2975256dadbd 100644 +--- a/fs/afs/main.c ++++ b/fs/afs/main.c +@@ -193,7 +193,7 @@ static int __init afs_init(void) + goto error_cache; + #endif + +- ret = register_pernet_subsys(&afs_net_ops); ++ ret = register_pernet_device(&afs_net_ops); + if (ret < 0) + goto error_net; + +@@ -213,7 +213,7 @@ static int __init afs_init(void) + error_proc: + afs_fs_exit(); + error_fs: +- unregister_pernet_subsys(&afs_net_ops); ++ unregister_pernet_device(&afs_net_ops); + error_net: + #ifdef CONFIG_AFS_FSCACHE + fscache_unregister_netfs(&afs_cache_netfs); +@@ -244,7 +244,7 @@ static void __exit afs_exit(void) + + proc_remove(afs_proc_symlink); + afs_fs_exit(); +- unregister_pernet_subsys(&afs_net_ops); ++ unregister_pernet_device(&afs_net_ops); + #ifdef CONFIG_AFS_FSCACHE + fscache_unregister_netfs(&afs_cache_netfs); + #endif +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c +index 398c1eef71906..0d7238cb45b56 100644 +--- a/fs/cifs/dir.c ++++ b/fs/cifs/dir.c +@@ -736,6 +736,7 @@ static int + cifs_d_revalidate(struct dentry *direntry, unsigned int flags) + { + struct inode *inode; ++ int rc; + + if (flags & LOOKUP_RCU) + return -ECHILD; +@@ -745,8 +746,25 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags) + if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode))) + CIFS_I(inode)->time = 0; /* force reval */ + +- if (cifs_revalidate_dentry(direntry)) +- return 0; ++ rc = cifs_revalidate_dentry(direntry); ++ if (rc) { ++ cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc); ++ switch (rc) { ++ case -ENOENT: ++ case -ESTALE: ++ /* ++ * Those errors mean the dentry is invalid ++ * (file was deleted or recreated) ++ */ ++ return 0; ++ default: ++ /* ++ * Otherwise some unexpected error happened ++ * report it as-is to VFS layer ++ */ ++ return rc; ++ } ++ } + else { + /* + * If the inode wasn't known to be a dfs entry when +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h +index 204a622b89ed3..56ec9fba3925b 100644 +--- a/fs/cifs/smb2pdu.h ++++ b/fs/cifs/smb2pdu.h +@@ -286,7 +286,7 @@ struct smb2_negotiate_req { + __le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */ + __le16 NegotiateContextCount; /* SMB3.1.1 only. MBZ earlier */ + __le16 Reserved2; +- __le16 Dialects[1]; /* One dialect (vers=) at a time for now */ ++ __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */ + } __packed; + + /* Dialects */ +diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c +index b1c2f416b9bd9..9391cd17a2b55 100644 +--- a/fs/cifs/transport.c ++++ b/fs/cifs/transport.c +@@ -655,10 +655,22 @@ wait_for_compound_request(struct TCP_Server_Info *server, int num, + spin_lock(&server->req_lock); + if (*credits < num) { + /* +- * Return immediately if not too many requests in flight since +- * we will likely be stuck on waiting for credits. ++ * If the server is tight on resources or just gives us less ++ * credits for other reasons (e.g. requests are coming out of ++ * order and the server delays granting more credits until it ++ * processes a missing mid) and we exhausted most available ++ * credits there may be situations when we try to send ++ * a compound request but we don't have enough credits. At this ++ * point the client needs to decide if it should wait for ++ * additional credits or fail the request. If at least one ++ * request is in flight there is a high probability that the ++ * server will return enough credits to satisfy this compound ++ * request. ++ * ++ * Return immediately if no requests in flight since we will be ++ * stuck on waiting for credits. + */ +- if (server->in_flight < num - *credits) { ++ if (server->in_flight == 0) { + spin_unlock(&server->req_lock); + return -ENOTSUPP; + } +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c +index b5c109703daaf..21c20fd5f9ee7 100644 +--- a/fs/hugetlbfs/inode.c ++++ b/fs/hugetlbfs/inode.c +@@ -735,9 +735,10 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, + + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + ++ set_page_huge_active(page); + /* + * unlock_page because locked by add_to_page_cache() +- * page_put due to reference from alloc_huge_page() ++ * put_page() due to reference from alloc_huge_page() + */ + unlock_page(page); + put_page(page); +diff --git a/fs/io_uring.c b/fs/io_uring.c +index 907ecaffc3386..3b6307f6bd93d 100644 +--- a/fs/io_uring.c ++++ b/fs/io_uring.c +@@ -8782,12 +8782,6 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, + + if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) { + atomic_dec(&task->io_uring->in_idle); +- /* +- * If the files that are going away are the ones in the thread +- * identity, clear them out. +- */ +- if (task->io_uring->identity->files == files) +- task->io_uring->identity->files = NULL; + io_sq_thread_unpark(ctx->sq_data); + } + } +diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c +index 28a075b5f5b2e..d1efa3a5a5032 100644 +--- a/fs/overlayfs/dir.c ++++ b/fs/overlayfs/dir.c +@@ -992,8 +992,8 @@ static char *ovl_get_redirect(struct dentry *dentry, bool abs_redirect) + + buflen -= thislen; + memcpy(&buf[buflen], name, thislen); +- tmp = dget_dlock(d->d_parent); + spin_unlock(&d->d_lock); ++ tmp = dget_parent(d); + + dput(d); + d = tmp; +diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c +index a1f72ac053e5f..5c5c3972ebd0a 100644 +--- a/fs/overlayfs/file.c ++++ b/fs/overlayfs/file.c +@@ -445,8 +445,9 @@ static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync) + const struct cred *old_cred; + int ret; + +- if (!ovl_should_sync(OVL_FS(file_inode(file)->i_sb))) +- return 0; ++ ret = ovl_sync_status(OVL_FS(file_inode(file)->i_sb)); ++ if (ret <= 0) ++ return ret; + + ret = ovl_real_fdget_meta(file, &real, !datasync); + if (ret) +diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h +index f8880aa2ba0ec..9f7af98ae2005 100644 +--- a/fs/overlayfs/overlayfs.h ++++ b/fs/overlayfs/overlayfs.h +@@ -322,6 +322,7 @@ int ovl_check_metacopy_xattr(struct ovl_fs *ofs, struct dentry *dentry); + bool ovl_is_metacopy_dentry(struct dentry *dentry); + char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry, + int padding); ++int ovl_sync_status(struct ovl_fs *ofs); + + static inline bool ovl_is_impuredir(struct super_block *sb, + struct dentry *dentry) +diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h +index 1b5a2094df8eb..b208eba5d0b64 100644 +--- a/fs/overlayfs/ovl_entry.h ++++ b/fs/overlayfs/ovl_entry.h +@@ -79,6 +79,8 @@ struct ovl_fs { + atomic_long_t last_ino; + /* Whiteout dentry cache */ + struct dentry *whiteout; ++ /* r/o snapshot of upperdir sb's only taken on volatile mounts */ ++ errseq_t errseq; + }; + + static inline struct vfsmount *ovl_upper_mnt(struct ovl_fs *ofs) +diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c +index 01620ebae1bd4..f404a78e6b607 100644 +--- a/fs/overlayfs/readdir.c ++++ b/fs/overlayfs/readdir.c +@@ -865,7 +865,7 @@ struct file *ovl_dir_real_file(const struct file *file, bool want_upper) + + struct ovl_dir_file *od = file->private_data; + struct dentry *dentry = file->f_path.dentry; +- struct file *realfile = od->realfile; ++ struct file *old, *realfile = od->realfile; + + if (!OVL_TYPE_UPPER(ovl_path_type(dentry))) + return want_upper ? NULL : realfile; +@@ -874,29 +874,20 @@ struct file *ovl_dir_real_file(const struct file *file, bool want_upper) + * Need to check if we started out being a lower dir, but got copied up + */ + if (!od->is_upper) { +- struct inode *inode = file_inode(file); +- + realfile = READ_ONCE(od->upperfile); + if (!realfile) { + struct path upperpath; + + ovl_path_upper(dentry, &upperpath); + realfile = ovl_dir_open_realfile(file, &upperpath); ++ if (IS_ERR(realfile)) ++ return realfile; + +- inode_lock(inode); +- if (!od->upperfile) { +- if (IS_ERR(realfile)) { +- inode_unlock(inode); +- return realfile; +- } +- smp_store_release(&od->upperfile, realfile); +- } else { +- /* somebody has beaten us to it */ +- if (!IS_ERR(realfile)) +- fput(realfile); +- realfile = od->upperfile; ++ old = cmpxchg_release(&od->upperfile, NULL, realfile); ++ if (old) { ++ fput(realfile); ++ realfile = old; + } +- inode_unlock(inode); + } + } + +@@ -909,8 +900,9 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, + struct file *realfile; + int err; + +- if (!ovl_should_sync(OVL_FS(file->f_path.dentry->d_sb))) +- return 0; ++ err = ovl_sync_status(OVL_FS(file->f_path.dentry->d_sb)); ++ if (err <= 0) ++ return err; + + realfile = ovl_dir_real_file(file, true); + err = PTR_ERR_OR_ZERO(realfile); +diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c +index 290983bcfbb35..d23177a53c95f 100644 +--- a/fs/overlayfs/super.c ++++ b/fs/overlayfs/super.c +@@ -261,11 +261,20 @@ static int ovl_sync_fs(struct super_block *sb, int wait) + struct super_block *upper_sb; + int ret; + +- if (!ovl_upper_mnt(ofs)) +- return 0; ++ ret = ovl_sync_status(ofs); ++ /* ++ * We have to always set the err, because the return value isn't ++ * checked in syncfs, and instead indirectly return an error via ++ * the sb's writeback errseq, which VFS inspects after this call. ++ */ ++ if (ret < 0) { ++ errseq_set(&sb->s_wb_err, -EIO); ++ return -EIO; ++ } ++ ++ if (!ret) ++ return ret; + +- if (!ovl_should_sync(ofs)) +- return 0; + /* + * Not called for sync(2) call or an emergency sync (SB_I_SKIP_SYNC). + * All the super blocks will be iterated, including upper_sb. +@@ -1927,6 +1936,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) + sb->s_op = &ovl_super_operations; + + if (ofs->config.upperdir) { ++ struct super_block *upper_sb; ++ + if (!ofs->config.workdir) { + pr_err("missing 'workdir'\n"); + goto out_err; +@@ -1936,6 +1947,16 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) + if (err) + goto out_err; + ++ upper_sb = ovl_upper_mnt(ofs)->mnt_sb; ++ if (!ovl_should_sync(ofs)) { ++ ofs->errseq = errseq_sample(&upper_sb->s_wb_err); ++ if (errseq_check(&upper_sb->s_wb_err, ofs->errseq)) { ++ err = -EIO; ++ pr_err("Cannot mount volatile when upperdir has an unseen error. Sync upperdir fs to clear state.\n"); ++ goto out_err; ++ } ++ } ++ + err = ovl_get_workdir(sb, ofs, &upperpath); + if (err) + goto out_err; +@@ -1943,9 +1964,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) + if (!ofs->workdir) + sb->s_flags |= SB_RDONLY; + +- sb->s_stack_depth = ovl_upper_mnt(ofs)->mnt_sb->s_stack_depth; +- sb->s_time_gran = ovl_upper_mnt(ofs)->mnt_sb->s_time_gran; +- ++ sb->s_stack_depth = upper_sb->s_stack_depth; ++ sb->s_time_gran = upper_sb->s_time_gran; + } + oe = ovl_get_lowerstack(sb, splitlower, numlower, ofs, layers); + err = PTR_ERR(oe); +diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c +index 23f475627d07f..6e7b8c882045c 100644 +--- a/fs/overlayfs/util.c ++++ b/fs/overlayfs/util.c +@@ -950,3 +950,30 @@ err_free: + kfree(buf); + return ERR_PTR(res); + } ++ ++/* ++ * ovl_sync_status() - Check fs sync status for volatile mounts ++ * ++ * Returns 1 if this is not a volatile mount and a real sync is required. ++ * ++ * Returns 0 if syncing can be skipped because mount is volatile, and no errors ++ * have occurred on the upperdir since the mount. ++ * ++ * Returns -errno if it is a volatile mount, and the error that occurred since ++ * the last mount. If the error code changes, it'll return the latest error ++ * code. ++ */ ++ ++int ovl_sync_status(struct ovl_fs *ofs) ++{ ++ struct vfsmount *mnt; ++ ++ if (ovl_should_sync(ofs)) ++ return 1; ++ ++ mnt = ovl_upper_mnt(ofs); ++ if (!mnt) ++ return 0; ++ ++ return errseq_check(&mnt->mnt_sb->s_wb_err, ofs->errseq); ++} +diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h +index f5e92fe9151c3..bd1c39907b924 100644 +--- a/include/drm/drm_dp_mst_helper.h ++++ b/include/drm/drm_dp_mst_helper.h +@@ -783,6 +783,7 @@ drm_dp_mst_detect_port(struct drm_connector *connector, + + struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); + ++int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count); + + int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc); + +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h +index ebca2ef022127..b5807f23caf80 100644 +--- a/include/linux/hugetlb.h ++++ b/include/linux/hugetlb.h +@@ -770,6 +770,8 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, + } + #endif + ++void set_page_huge_active(struct page *page); ++ + #else /* CONFIG_HUGETLB_PAGE */ + struct hstate {}; + +diff --git a/include/linux/iommu.h b/include/linux/iommu.h +index b95a6f8db6ff9..9bbcfe3b0bb12 100644 +--- a/include/linux/iommu.h ++++ b/include/linux/iommu.h +@@ -614,7 +614,10 @@ static inline void dev_iommu_fwspec_set(struct device *dev, + + static inline void *dev_iommu_priv_get(struct device *dev) + { +- return dev->iommu->priv; ++ if (dev->iommu) ++ return dev->iommu->priv; ++ else ++ return NULL; + } + + static inline void dev_iommu_priv_set(struct device *dev, void *priv) +diff --git a/include/linux/irq.h b/include/linux/irq.h +index c54365309e975..a36d35c259963 100644 +--- a/include/linux/irq.h ++++ b/include/linux/irq.h +@@ -922,7 +922,7 @@ int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from, + __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL) + + #define irq_alloc_desc(node) \ +- irq_alloc_descs(-1, 0, 1, node) ++ irq_alloc_descs(-1, 1, 1, node) + + #define irq_alloc_desc_at(at, node) \ + irq_alloc_descs(at, at, 1, node) +@@ -937,7 +937,7 @@ int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from, + __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL) + + #define devm_irq_alloc_desc(dev, node) \ +- devm_irq_alloc_descs(dev, -1, 0, 1, node) ++ devm_irq_alloc_descs(dev, -1, 1, 1, node) + + #define devm_irq_alloc_desc_at(dev, at, node) \ + devm_irq_alloc_descs(dev, at, at, 1, node) +diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h +index 629abaf25681d..21f21f7f878ce 100644 +--- a/include/linux/kprobes.h ++++ b/include/linux/kprobes.h +@@ -251,7 +251,7 @@ extern void kprobes_inc_nmissed_count(struct kprobe *p); + extern bool arch_within_kprobe_blacklist(unsigned long addr); + extern int arch_populate_kprobe_blacklist(void); + extern bool arch_kprobe_on_func_entry(unsigned long offset); +-extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset); ++extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset); + + extern bool within_kprobe_blacklist(unsigned long addr); + extern int kprobe_add_ksym_blacklist(unsigned long entry); +diff --git a/include/linux/msi.h b/include/linux/msi.h +index 6b584cc4757cd..2a3e997751cea 100644 +--- a/include/linux/msi.h ++++ b/include/linux/msi.h +@@ -139,6 +139,12 @@ struct msi_desc { + list_for_each_entry((desc), dev_to_msi_list((dev)), list) + #define for_each_msi_entry_safe(desc, tmp, dev) \ + list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list) ++#define for_each_msi_vector(desc, __irq, dev) \ ++ for_each_msi_entry((desc), (dev)) \ ++ if ((desc)->irq) \ ++ for (__irq = (desc)->irq; \ ++ __irq < ((desc)->irq + (desc)->nvec_used); \ ++ __irq++) + + #ifdef CONFIG_IRQ_MSI_IOMMU + static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) +diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h +index 0f21617f1a668..966ed89803274 100644 +--- a/include/linux/tracepoint.h ++++ b/include/linux/tracepoint.h +@@ -307,11 +307,13 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) + \ + it_func_ptr = \ + rcu_dereference_raw((&__tracepoint_##_name)->funcs); \ +- do { \ +- it_func = (it_func_ptr)->func; \ +- __data = (it_func_ptr)->data; \ +- ((void(*)(void *, proto))(it_func))(__data, args); \ +- } while ((++it_func_ptr)->func); \ ++ if (it_func_ptr) { \ ++ do { \ ++ it_func = (it_func_ptr)->func; \ ++ __data = (it_func_ptr)->data; \ ++ ((void(*)(void *, proto))(it_func))(__data, args); \ ++ } while ((++it_func_ptr)->func); \ ++ } \ + return 0; \ + } \ + DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name); +diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h +index 938eaf9517e26..76dad53a410ac 100644 +--- a/include/linux/vmalloc.h ++++ b/include/linux/vmalloc.h +@@ -24,7 +24,8 @@ struct notifier_block; /* in notifier.h */ + #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ + #define VM_NO_GUARD 0x00000040 /* don't add guard page */ + #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ +-#define VM_MAP_PUT_PAGES 0x00000100 /* put pages and free array in vfree */ ++#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */ ++#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ + + /* + * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC. +@@ -37,12 +38,6 @@ struct notifier_block; /* in notifier.h */ + * determine which allocations need the module shadow freed. + */ + +-/* +- * Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with +- * vfree_atomic(). +- */ +-#define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */ +- + /* bits [20..32] reserved for arch specific ioremap internals */ + + /* +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h +index d8fd8676fc724..3648164faa060 100644 +--- a/include/net/sch_generic.h ++++ b/include/net/sch_generic.h +@@ -1155,7 +1155,7 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, + old = *pold; + *pold = new; + if (old != NULL) +- qdisc_tree_flush_backlog(old); ++ qdisc_purge_queue(old); + sch_tree_unlock(sch); + + return old; +diff --git a/include/net/udp.h b/include/net/udp.h +index 295d52a735982..949ae14a54250 100644 +--- a/include/net/udp.h ++++ b/include/net/udp.h +@@ -178,7 +178,7 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, + int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup); + + struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, +- netdev_features_t features); ++ netdev_features_t features, bool is_ipv6); + + static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb) + { +diff --git a/init/init_task.c b/init/init_task.c +index 15f6eb93a04fa..16d14c2ebb552 100644 +--- a/init/init_task.c ++++ b/init/init_task.c +@@ -198,7 +198,8 @@ struct task_struct init_task + .lockdep_recursion = 0, + #endif + #ifdef CONFIG_FUNCTION_GRAPH_TRACER +- .ret_stack = NULL, ++ .ret_stack = NULL, ++ .tracing_graph_pause = ATOMIC_INIT(0), + #endif + #if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION) + .trace_recursion = 0, +diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c +index dbc1dbdd2cbf0..c2a501cd90eba 100644 +--- a/kernel/bpf/bpf_inode_storage.c ++++ b/kernel/bpf/bpf_inode_storage.c +@@ -125,8 +125,12 @@ static int bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key, + + fd = *(int *)key; + f = fget_raw(fd); +- if (!f || !inode_storage_ptr(f->f_inode)) ++ if (!f) ++ return -EBADF; ++ if (!inode_storage_ptr(f->f_inode)) { ++ fput(f); + return -EBADF; ++ } + + sdata = bpf_local_storage_update(f->f_inode, + (struct bpf_local_storage_map *)map, +diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c +index 96555a8a2c545..6aa9e10c6335a 100644 +--- a/kernel/bpf/cgroup.c ++++ b/kernel/bpf/cgroup.c +@@ -1442,6 +1442,11 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, + goto out; + } + ++ if (ctx.optlen < 0) { ++ ret = -EFAULT; ++ goto out; ++ } ++ + if (copy_from_user(ctx.optval, optval, + min(ctx.optlen, max_optlen)) != 0) { + ret = -EFAULT; +@@ -1459,7 +1464,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, + goto out; + } + +- if (ctx.optlen > max_optlen) { ++ if (ctx.optlen > max_optlen || ctx.optlen < 0) { + ret = -EFAULT; + goto out; + } +diff --git a/kernel/bpf/preload/Makefile b/kernel/bpf/preload/Makefile +index 23ee310b6eb49..1951332dd15f5 100644 +--- a/kernel/bpf/preload/Makefile ++++ b/kernel/bpf/preload/Makefile +@@ -4,8 +4,11 @@ LIBBPF_SRCS = $(srctree)/tools/lib/bpf/ + LIBBPF_A = $(obj)/libbpf.a + LIBBPF_OUT = $(abspath $(obj)) + ++# Although not in use by libbpf's Makefile, set $(O) so that the "dummy" test ++# in tools/scripts/Makefile.include always succeeds when building the kernel ++# with $(O) pointing to a relative path, as in "make O=build bindeb-pkg". + $(LIBBPF_A): +- $(Q)$(MAKE) -C $(LIBBPF_SRCS) OUTPUT=$(LIBBPF_OUT)/ $(LIBBPF_OUT)/libbpf.a ++ $(Q)$(MAKE) -C $(LIBBPF_SRCS) O=$(LIBBPF_OUT)/ OUTPUT=$(LIBBPF_OUT)/ $(LIBBPF_OUT)/libbpf.a + + userccflags += -I $(srctree)/tools/include/ -I $(srctree)/tools/include/uapi \ + -I $(srctree)/tools/lib/ -Wno-unused-result +diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c +index 2c0c4d6d0f83a..d924676c8781b 100644 +--- a/kernel/irq/msi.c ++++ b/kernel/irq/msi.c +@@ -436,22 +436,22 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, + + can_reserve = msi_check_reservation_mode(domain, info, dev); + +- for_each_msi_entry(desc, dev) { +- virq = desc->irq; +- if (desc->nvec_used == 1) +- dev_dbg(dev, "irq %d for MSI\n", virq); +- else ++ /* ++ * This flag is set by the PCI layer as we need to activate ++ * the MSI entries before the PCI layer enables MSI in the ++ * card. Otherwise the card latches a random msi message. ++ */ ++ if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) ++ goto skip_activate; ++ ++ for_each_msi_vector(desc, i, dev) { ++ if (desc->irq == i) { ++ virq = desc->irq; + dev_dbg(dev, "irq [%d-%d] for MSI\n", + virq, virq + desc->nvec_used - 1); +- /* +- * This flag is set by the PCI layer as we need to activate +- * the MSI entries before the PCI layer enables MSI in the +- * card. Otherwise the card latches a random msi message. +- */ +- if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) +- continue; ++ } + +- irq_data = irq_domain_get_irq_data(domain, desc->irq); ++ irq_data = irq_domain_get_irq_data(domain, i); + if (!can_reserve) { + irqd_clr_can_reserve(irq_data); + if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK) +@@ -462,28 +462,24 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, + goto cleanup; + } + ++skip_activate: + /* + * If these interrupts use reservation mode, clear the activated bit + * so request_irq() will assign the final vector. + */ + if (can_reserve) { +- for_each_msi_entry(desc, dev) { +- irq_data = irq_domain_get_irq_data(domain, desc->irq); ++ for_each_msi_vector(desc, i, dev) { ++ irq_data = irq_domain_get_irq_data(domain, i); + irqd_clr_activated(irq_data); + } + } + return 0; + + cleanup: +- for_each_msi_entry(desc, dev) { +- struct irq_data *irqd; +- +- if (desc->irq == virq) +- break; +- +- irqd = irq_domain_get_irq_data(domain, desc->irq); +- if (irqd_is_activated(irqd)) +- irq_domain_deactivate_irq(irqd); ++ for_each_msi_vector(desc, i, dev) { ++ irq_data = irq_domain_get_irq_data(domain, i); ++ if (irqd_is_activated(irq_data)) ++ irq_domain_deactivate_irq(irq_data); + } + msi_domain_free_irqs(domain, dev); + return ret; +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index 41fdbb7953c60..911c77ef5bbcd 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -2082,28 +2082,48 @@ bool __weak arch_kprobe_on_func_entry(unsigned long offset) + return !offset; + } + +-bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset) ++/** ++ * kprobe_on_func_entry() -- check whether given address is function entry ++ * @addr: Target address ++ * @sym: Target symbol name ++ * @offset: The offset from the symbol or the address ++ * ++ * This checks whether the given @addr+@offset or @sym+@offset is on the ++ * function entry address or not. ++ * This returns 0 if it is the function entry, or -EINVAL if it is not. ++ * And also it returns -ENOENT if it fails the symbol or address lookup. ++ * Caller must pass @addr or @sym (either one must be NULL), or this ++ * returns -EINVAL. ++ */ ++int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset) + { + kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset); + + if (IS_ERR(kp_addr)) +- return false; ++ return PTR_ERR(kp_addr); + +- if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) || +- !arch_kprobe_on_func_entry(offset)) +- return false; ++ if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset)) ++ return -ENOENT; + +- return true; ++ if (!arch_kprobe_on_func_entry(offset)) ++ return -EINVAL; ++ ++ return 0; + } + + int register_kretprobe(struct kretprobe *rp) + { +- int ret = 0; ++ int ret; + struct kretprobe_instance *inst; + int i; + void *addr; + +- if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset)) ++ ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset); ++ if (ret) ++ return ret; ++ ++ /* If only rp->kp.addr is specified, check reregistering kprobes */ ++ if (rp->kp.addr && check_kprobe_rereg(&rp->kp)) + return -EINVAL; + + if (kretprobe_blacklist_size) { +diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c +index 5658f13037b3d..a58da91eadb5c 100644 +--- a/kernel/trace/fgraph.c ++++ b/kernel/trace/fgraph.c +@@ -395,7 +395,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) + } + + if (t->ret_stack == NULL) { +- atomic_set(&t->tracing_graph_pause, 0); + atomic_set(&t->trace_overrun, 0); + t->curr_ret_stack = -1; + t->curr_ret_depth = -1; +@@ -490,7 +489,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); + static void + graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) + { +- atomic_set(&t->tracing_graph_pause, 0); + atomic_set(&t->trace_overrun, 0); + t->ftrace_timestamp = 0; + /* make curr_ret_stack visible before we add the ret_stack */ +diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c +index 10bbb0f381d56..ee4571b624bcb 100644 +--- a/kernel/trace/trace_irqsoff.c ++++ b/kernel/trace/trace_irqsoff.c +@@ -562,6 +562,8 @@ static int __irqsoff_tracer_init(struct trace_array *tr) + /* non overwrite screws up the latency tracers */ + set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); + set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); ++ /* without pause, we will produce garbage if another latency occurs */ ++ set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1); + + tr->max_latency = 0; + irqsoff_trace = tr; +@@ -583,11 +585,13 @@ static void __irqsoff_tracer_reset(struct trace_array *tr) + { + int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; + int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; ++ int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE; + + stop_irqsoff_tracer(tr, is_graph(tr)); + + set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); + set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); ++ set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag); + ftrace_reset_array_ops(tr); + + irqsoff_busy = false; +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c +index 5fff39541b8ae..68150b9cbde92 100644 +--- a/kernel/trace/trace_kprobe.c ++++ b/kernel/trace/trace_kprobe.c +@@ -221,9 +221,9 @@ bool trace_kprobe_on_func_entry(struct trace_event_call *call) + { + struct trace_kprobe *tk = trace_kprobe_primary_from_call(call); + +- return tk ? kprobe_on_func_entry(tk->rp.kp.addr, ++ return tk ? (kprobe_on_func_entry(tk->rp.kp.addr, + tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name, +- tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false; ++ tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false; + } + + bool trace_kprobe_error_injectable(struct trace_event_call *call) +@@ -828,9 +828,11 @@ static int trace_kprobe_create(int argc, const char *argv[]) + } + if (is_return) + flags |= TPARG_FL_RETURN; +- if (kprobe_on_func_entry(NULL, symbol, offset)) ++ ret = kprobe_on_func_entry(NULL, symbol, offset); ++ if (ret == 0) + flags |= TPARG_FL_FENTRY; +- if (offset && is_return && !(flags & TPARG_FL_FENTRY)) { ++ /* Defer the ENOENT case until register kprobe */ ++ if (ret == -EINVAL && is_return) { + trace_probe_log_err(0, BAD_RETPROBE); + goto parse_error; + } +diff --git a/mm/compaction.c b/mm/compaction.c +index 13cb7a961b319..0846d4ffa3387 100644 +--- a/mm/compaction.c ++++ b/mm/compaction.c +@@ -1302,7 +1302,7 @@ fast_isolate_freepages(struct compact_control *cc) + { + unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1); + unsigned int nr_scanned = 0; +- unsigned long low_pfn, min_pfn, high_pfn = 0, highest = 0; ++ unsigned long low_pfn, min_pfn, highest = 0; + unsigned long nr_isolated = 0; + unsigned long distance; + struct page *page = NULL; +@@ -1347,6 +1347,7 @@ fast_isolate_freepages(struct compact_control *cc) + struct page *freepage; + unsigned long flags; + unsigned int order_scanned = 0; ++ unsigned long high_pfn = 0; + + if (!area->nr_free) + continue; +diff --git a/mm/filemap.c b/mm/filemap.c +index 0b2067b3c3283..125b69f59caad 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -835,6 +835,7 @@ noinline int __add_to_page_cache_locked(struct page *page, + XA_STATE(xas, &mapping->i_pages, offset); + int huge = PageHuge(page); + int error; ++ bool charged = false; + + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(PageSwapBacked(page), page); +@@ -848,6 +849,7 @@ noinline int __add_to_page_cache_locked(struct page *page, + error = mem_cgroup_charge(page, current->mm, gfp); + if (error) + goto error; ++ charged = true; + } + + gfp &= GFP_RECLAIM_MASK; +@@ -896,6 +898,8 @@ unlock: + + if (xas_error(&xas)) { + error = xas_error(&xas); ++ if (charged) ++ mem_cgroup_uncharge(page); + goto error; + } + +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 85eda66eb625d..4a78514830d5a 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -2188,7 +2188,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + { + spinlock_t *ptl; + struct mmu_notifier_range range; +- bool was_locked = false; ++ bool do_unlock_page = false; + pmd_t _pmd; + + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, +@@ -2204,7 +2204,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + VM_BUG_ON(freeze && !page); + if (page) { + VM_WARN_ON_ONCE(!PageLocked(page)); +- was_locked = true; + if (page != pmd_page(*pmd)) + goto out; + } +@@ -2213,19 +2212,29 @@ repeat: + if (pmd_trans_huge(*pmd)) { + if (!page) { + page = pmd_page(*pmd); +- if (unlikely(!trylock_page(page))) { +- get_page(page); +- _pmd = *pmd; +- spin_unlock(ptl); +- lock_page(page); +- spin_lock(ptl); +- if (unlikely(!pmd_same(*pmd, _pmd))) { +- unlock_page(page); ++ /* ++ * An anonymous page must be locked, to ensure that a ++ * concurrent reuse_swap_page() sees stable mapcount; ++ * but reuse_swap_page() is not used on shmem or file, ++ * and page lock must not be taken when zap_pmd_range() ++ * calls __split_huge_pmd() while i_mmap_lock is held. ++ */ ++ if (PageAnon(page)) { ++ if (unlikely(!trylock_page(page))) { ++ get_page(page); ++ _pmd = *pmd; ++ spin_unlock(ptl); ++ lock_page(page); ++ spin_lock(ptl); ++ if (unlikely(!pmd_same(*pmd, _pmd))) { ++ unlock_page(page); ++ put_page(page); ++ page = NULL; ++ goto repeat; ++ } + put_page(page); +- page = NULL; +- goto repeat; + } +- put_page(page); ++ do_unlock_page = true; + } + } + if (PageMlocked(page)) +@@ -2235,7 +2244,7 @@ repeat: + __split_huge_pmd_locked(vma, pmd, range.start, freeze); + out: + spin_unlock(ptl); +- if (!was_locked && page) ++ if (do_unlock_page) + unlock_page(page); + /* + * No need to double call mmu_notifier->invalidate_range() callback. +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 9a3f06cdcc2a8..26909396898b6 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -79,6 +79,21 @@ DEFINE_SPINLOCK(hugetlb_lock); + static int num_fault_mutexes; + struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; + ++static inline bool PageHugeFreed(struct page *head) ++{ ++ return page_private(head + 4) == -1UL; ++} ++ ++static inline void SetPageHugeFreed(struct page *head) ++{ ++ set_page_private(head + 4, -1UL); ++} ++ ++static inline void ClearPageHugeFreed(struct page *head) ++{ ++ set_page_private(head + 4, 0); ++} ++ + /* Forward declaration */ + static int hugetlb_acct_memory(struct hstate *h, long delta); + +@@ -1028,6 +1043,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page) + list_move(&page->lru, &h->hugepage_freelists[nid]); + h->free_huge_pages++; + h->free_huge_pages_node[nid]++; ++ SetPageHugeFreed(page); + } + + static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) +@@ -1044,6 +1060,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) + + list_move(&page->lru, &h->hugepage_activelist); + set_page_refcounted(page); ++ ClearPageHugeFreed(page); + h->free_huge_pages--; + h->free_huge_pages_node[nid]--; + return page; +@@ -1344,12 +1361,11 @@ struct hstate *size_to_hstate(unsigned long size) + */ + bool page_huge_active(struct page *page) + { +- VM_BUG_ON_PAGE(!PageHuge(page), page); +- return PageHead(page) && PagePrivate(&page[1]); ++ return PageHeadHuge(page) && PagePrivate(&page[1]); + } + + /* never called for tail page */ +-static void set_page_huge_active(struct page *page) ++void set_page_huge_active(struct page *page) + { + VM_BUG_ON_PAGE(!PageHeadHuge(page), page); + SetPagePrivate(&page[1]); +@@ -1505,6 +1521,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) + spin_lock(&hugetlb_lock); + h->nr_huge_pages++; + h->nr_huge_pages_node[nid]++; ++ ClearPageHugeFreed(page); + spin_unlock(&hugetlb_lock); + } + +@@ -1755,6 +1772,7 @@ int dissolve_free_huge_page(struct page *page) + { + int rc = -EBUSY; + ++retry: + /* Not to disrupt normal path by vainly holding hugetlb_lock */ + if (!PageHuge(page)) + return 0; +@@ -1771,6 +1789,26 @@ int dissolve_free_huge_page(struct page *page) + int nid = page_to_nid(head); + if (h->free_huge_pages - h->resv_huge_pages == 0) + goto out; ++ ++ /* ++ * We should make sure that the page is already on the free list ++ * when it is dissolved. ++ */ ++ if (unlikely(!PageHugeFreed(head))) { ++ spin_unlock(&hugetlb_lock); ++ cond_resched(); ++ ++ /* ++ * Theoretically, we should return -EBUSY when we ++ * encounter this race. In fact, we have a chance ++ * to successfully dissolve the page if we do a ++ * retry. Because the race window is quite small. ++ * If we seize this opportunity, it is an optimization ++ * for increasing the success rate of dissolving page. ++ */ ++ goto retry; ++ } ++ + /* + * Move PageHWPoison flag from head page to the raw error page, + * which makes any subpages rather than the error page reusable. +@@ -5556,9 +5594,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list) + { + bool ret = true; + +- VM_BUG_ON_PAGE(!PageHead(page), page); + spin_lock(&hugetlb_lock); +- if (!page_huge_active(page) || !get_page_unless_zero(page)) { ++ if (!PageHeadHuge(page) || !page_huge_active(page) || ++ !get_page_unless_zero(page)) { + ret = false; + goto unlock; + } +diff --git a/mm/memblock.c b/mm/memblock.c +index b68ee86788af9..10bd7d1ef0f49 100644 +--- a/mm/memblock.c ++++ b/mm/memblock.c +@@ -275,14 +275,6 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, + * + * Find @size free area aligned to @align in the specified range and node. + * +- * When allocation direction is bottom-up, the @start should be greater +- * than the end of the kernel image. Otherwise, it will be trimmed. The +- * reason is that we want the bottom-up allocation just near the kernel +- * image so it is highly likely that the allocated memory and the kernel +- * will reside in the same node. +- * +- * If bottom-up allocation failed, will try to allocate memory top-down. +- * + * Return: + * Found address on success, 0 on failure. + */ +@@ -291,8 +283,6 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, + phys_addr_t end, int nid, + enum memblock_flags flags) + { +- phys_addr_t kernel_end, ret; +- + /* pump up @end */ + if (end == MEMBLOCK_ALLOC_ACCESSIBLE || + end == MEMBLOCK_ALLOC_KASAN) +@@ -301,40 +291,13 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, + /* avoid allocating the first page */ + start = max_t(phys_addr_t, start, PAGE_SIZE); + end = max(start, end); +- kernel_end = __pa_symbol(_end); +- +- /* +- * try bottom-up allocation only when bottom-up mode +- * is set and @end is above the kernel image. +- */ +- if (memblock_bottom_up() && end > kernel_end) { +- phys_addr_t bottom_up_start; +- +- /* make sure we will allocate above the kernel */ +- bottom_up_start = max(start, kernel_end); + +- /* ok, try bottom-up allocation first */ +- ret = __memblock_find_range_bottom_up(bottom_up_start, end, +- size, align, nid, flags); +- if (ret) +- return ret; +- +- /* +- * we always limit bottom-up allocation above the kernel, +- * but top-down allocation doesn't have the limit, so +- * retrying top-down allocation may succeed when bottom-up +- * allocation failed. +- * +- * bottom-up allocation is expected to be fail very rarely, +- * so we use WARN_ONCE() here to see the stack trace if +- * fail happens. +- */ +- WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE), +- "memblock: bottom-up allocation failed, memory hotremove may be affected\n"); +- } +- +- return __memblock_find_range_top_down(start, end, size, align, nid, +- flags); ++ if (memblock_bottom_up()) ++ return __memblock_find_range_bottom_up(start, end, size, align, ++ nid, flags); ++ else ++ return __memblock_find_range_top_down(start, end, size, align, ++ nid, flags); + } + + /** +diff --git a/net/core/neighbour.c b/net/core/neighbour.c +index 9500d28a43b0e..2fe4bbb6b80cf 100644 +--- a/net/core/neighbour.c ++++ b/net/core/neighbour.c +@@ -1245,13 +1245,14 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr, + old = neigh->nud_state; + err = -EPERM; + +- if (!(flags & NEIGH_UPDATE_F_ADMIN) && +- (old & (NUD_NOARP | NUD_PERMANENT))) +- goto out; + if (neigh->dead) { + NL_SET_ERR_MSG(extack, "Neighbor entry is now dead"); ++ new = old; + goto out; + } ++ if (!(flags & NEIGH_UPDATE_F_ADMIN) && ++ (old & (NUD_NOARP | NUD_PERMANENT))) ++ goto out; + + ext_learn_change = neigh_update_ext_learned(neigh, flags, ¬ify); + +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c +index 64594aa755f05..76a420c76f16e 100644 +--- a/net/ipv4/ip_tunnel.c ++++ b/net/ipv4/ip_tunnel.c +@@ -317,7 +317,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev) + } + + dev->needed_headroom = t_hlen + hlen; +- mtu -= (dev->hard_header_len + t_hlen); ++ mtu -= t_hlen; + + if (mtu < IPV4_MIN_MTU) + mtu = IPV4_MIN_MTU; +@@ -347,7 +347,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net, + nt = netdev_priv(dev); + t_hlen = nt->hlen + sizeof(struct iphdr); + dev->min_mtu = ETH_MIN_MTU; +- dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen; ++ dev->max_mtu = IP_MAX_MTU - t_hlen; + ip_tunnel_add(itn, nt); + return nt; + +@@ -488,11 +488,10 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, + int mtu; + + tunnel_hlen = md ? tunnel_hlen : tunnel->hlen; +- pkt_size = skb->len - tunnel_hlen - dev->hard_header_len; ++ pkt_size = skb->len - tunnel_hlen; + + if (df) +- mtu = dst_mtu(&rt->dst) - dev->hard_header_len +- - sizeof(struct iphdr) - tunnel_hlen; ++ mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel_hlen); + else + mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; + +@@ -972,7 +971,7 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict) + { + struct ip_tunnel *tunnel = netdev_priv(dev); + int t_hlen = tunnel->hlen + sizeof(struct iphdr); +- int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen; ++ int max_mtu = IP_MAX_MTU - t_hlen; + + if (new_mtu < ETH_MIN_MTU) + return -EINVAL; +@@ -1149,10 +1148,9 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], + + mtu = ip_tunnel_bind_dev(dev); + if (tb[IFLA_MTU]) { +- unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen; ++ unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr)); + +- mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, +- (unsigned int)(max - sizeof(struct iphdr))); ++ mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max); + } + + err = dev_set_mtu(dev, mtu); +diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c +index c62805cd31319..cfdaac4a57e41 100644 +--- a/net/ipv4/udp_offload.c ++++ b/net/ipv4/udp_offload.c +@@ -184,8 +184,67 @@ out_unlock: + } + EXPORT_SYMBOL(skb_udp_tunnel_segment); + ++static void __udpv4_gso_segment_csum(struct sk_buff *seg, ++ __be32 *oldip, __be32 *newip, ++ __be16 *oldport, __be16 *newport) ++{ ++ struct udphdr *uh; ++ struct iphdr *iph; ++ ++ if (*oldip == *newip && *oldport == *newport) ++ return; ++ ++ uh = udp_hdr(seg); ++ iph = ip_hdr(seg); ++ ++ if (uh->check) { ++ inet_proto_csum_replace4(&uh->check, seg, *oldip, *newip, ++ true); ++ inet_proto_csum_replace2(&uh->check, seg, *oldport, *newport, ++ false); ++ if (!uh->check) ++ uh->check = CSUM_MANGLED_0; ++ } ++ *oldport = *newport; ++ ++ csum_replace4(&iph->check, *oldip, *newip); ++ *oldip = *newip; ++} ++ ++static struct sk_buff *__udpv4_gso_segment_list_csum(struct sk_buff *segs) ++{ ++ struct sk_buff *seg; ++ struct udphdr *uh, *uh2; ++ struct iphdr *iph, *iph2; ++ ++ seg = segs; ++ uh = udp_hdr(seg); ++ iph = ip_hdr(seg); ++ ++ if ((udp_hdr(seg)->dest == udp_hdr(seg->next)->dest) && ++ (udp_hdr(seg)->source == udp_hdr(seg->next)->source) && ++ (ip_hdr(seg)->daddr == ip_hdr(seg->next)->daddr) && ++ (ip_hdr(seg)->saddr == ip_hdr(seg->next)->saddr)) ++ return segs; ++ ++ while ((seg = seg->next)) { ++ uh2 = udp_hdr(seg); ++ iph2 = ip_hdr(seg); ++ ++ __udpv4_gso_segment_csum(seg, ++ &iph2->saddr, &iph->saddr, ++ &uh2->source, &uh->source); ++ __udpv4_gso_segment_csum(seg, ++ &iph2->daddr, &iph->daddr, ++ &uh2->dest, &uh->dest); ++ } ++ ++ return segs; ++} ++ + static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb, +- netdev_features_t features) ++ netdev_features_t features, ++ bool is_ipv6) + { + unsigned int mss = skb_shinfo(skb)->gso_size; + +@@ -195,11 +254,11 @@ static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb, + + udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss); + +- return skb; ++ return is_ipv6 ? skb : __udpv4_gso_segment_list_csum(skb); + } + + struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, +- netdev_features_t features) ++ netdev_features_t features, bool is_ipv6) + { + struct sock *sk = gso_skb->sk; + unsigned int sum_truesize = 0; +@@ -211,7 +270,7 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, + __be16 newlen; + + if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST) +- return __udp_gso_segment_list(gso_skb, features); ++ return __udp_gso_segment_list(gso_skb, features, is_ipv6); + + mss = skb_shinfo(gso_skb)->gso_size; + if (gso_skb->len <= sizeof(*uh) + mss) +@@ -325,7 +384,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, + goto out; + + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) +- return __udp_gso_segment(skb, features); ++ return __udp_gso_segment(skb, features, false); + + mss = skb_shinfo(skb)->gso_size; + if (unlikely(skb->len <= mss)) +diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c +index f9e888d1b9af8..ebee748f25b9e 100644 +--- a/net/ipv6/udp_offload.c ++++ b/net/ipv6/udp_offload.c +@@ -46,7 +46,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, + goto out; + + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) +- return __udp_gso_segment(skb, features); ++ return __udp_gso_segment(skb, features, true); + + /* Do software UFO. Complete and fill in the UDP checksum as HW cannot + * do checksum of UDP packets sent as multiple IP fragments. +diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c +index 7a4d0715d1c32..a966d29c772d9 100644 +--- a/net/lapb/lapb_out.c ++++ b/net/lapb/lapb_out.c +@@ -82,7 +82,8 @@ void lapb_kick(struct lapb_cb *lapb) + skb = skb_dequeue(&lapb->write_queue); + + do { +- if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { ++ skbn = skb_copy(skb, GFP_ATOMIC); ++ if (!skbn) { + skb_queue_head(&lapb->write_queue, skb); + break; + } +diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c +index c9a8a2433e8ac..48322e45e7ddb 100644 +--- a/net/mac80211/driver-ops.c ++++ b/net/mac80211/driver-ops.c +@@ -125,8 +125,11 @@ int drv_sta_state(struct ieee80211_local *local, + } else if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC) { + ret = drv_sta_add(local, sdata, &sta->sta); +- if (ret == 0) ++ if (ret == 0) { + sta->uploaded = true; ++ if (rcu_access_pointer(sta->sta.rates)) ++ drv_sta_rate_tbl_update(local, sdata, &sta->sta); ++ } + } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH) { + drv_sta_remove(local, sdata, &sta->sta); +diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c +index 45927202c71c6..63652c39c8e07 100644 +--- a/net/mac80211/rate.c ++++ b/net/mac80211/rate.c +@@ -960,7 +960,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw, + if (old) + kfree_rcu(old, rcu_head); + +- drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); ++ if (sta->uploaded) ++ drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); + + ieee80211_sta_set_expected_throughput(pubsta, sta_get_expected_throughput(sta)); + +diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c +index 0a2f4817ec6cf..41671af6b33f9 100644 +--- a/net/rxrpc/af_rxrpc.c ++++ b/net/rxrpc/af_rxrpc.c +@@ -990,7 +990,7 @@ static int __init af_rxrpc_init(void) + goto error_security; + } + +- ret = register_pernet_subsys(&rxrpc_net_ops); ++ ret = register_pernet_device(&rxrpc_net_ops); + if (ret) + goto error_pernet; + +@@ -1035,7 +1035,7 @@ error_key_type: + error_sock: + proto_unregister(&rxrpc_proto); + error_proto: +- unregister_pernet_subsys(&rxrpc_net_ops); ++ unregister_pernet_device(&rxrpc_net_ops); + error_pernet: + rxrpc_exit_security(); + error_security: +@@ -1057,7 +1057,7 @@ static void __exit af_rxrpc_exit(void) + unregister_key_type(&key_type_rxrpc); + sock_unregister(PF_RXRPC); + proto_unregister(&rxrpc_proto); +- unregister_pernet_subsys(&rxrpc_net_ops); ++ unregister_pernet_device(&rxrpc_net_ops); + ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0); + ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0); + +diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c +index 4404c491eb388..fa7b7ae2c2c5f 100644 +--- a/net/sunrpc/svcsock.c ++++ b/net/sunrpc/svcsock.c +@@ -1113,14 +1113,15 @@ static int svc_tcp_sendmsg(struct socket *sock, struct msghdr *msg, + unsigned int offset, len, remaining; + struct bio_vec *bvec; + +- bvec = xdr->bvec; +- offset = xdr->page_base; ++ bvec = xdr->bvec + (xdr->page_base >> PAGE_SHIFT); ++ offset = offset_in_page(xdr->page_base); + remaining = xdr->page_len; + flags = MSG_MORE | MSG_SENDPAGE_NOTLAST; + while (remaining > 0) { + if (remaining <= PAGE_SIZE && tail->iov_len == 0) + flags = 0; +- len = min(remaining, bvec->bv_len); ++ ++ len = min(remaining, bvec->bv_len - offset); + ret = kernel_sendpage(sock, bvec->bv_page, + bvec->bv_offset + offset, + len, flags); +diff --git a/scripts/Makefile b/scripts/Makefile +index b5418ec587fbd..9de3c03b94aa7 100644 +--- a/scripts/Makefile ++++ b/scripts/Makefile +@@ -3,6 +3,9 @@ + # scripts contains sources for various helper programs used throughout + # the kernel for the build process. + ++CRYPTO_LIBS = $(shell pkg-config --libs libcrypto 2> /dev/null || echo -lcrypto) ++CRYPTO_CFLAGS = $(shell pkg-config --cflags libcrypto 2> /dev/null) ++ + hostprogs-always-$(CONFIG_BUILD_BIN2C) += bin2c + hostprogs-always-$(CONFIG_KALLSYMS) += kallsyms + hostprogs-always-$(BUILD_C_RECORDMCOUNT) += recordmcount +@@ -14,8 +17,9 @@ hostprogs-always-$(CONFIG_SYSTEM_EXTRA_CERTIFICATE) += insert-sys-cert + + HOSTCFLAGS_sorttable.o = -I$(srctree)/tools/include + HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include +-HOSTLDLIBS_sign-file = -lcrypto +-HOSTLDLIBS_extract-cert = -lcrypto ++HOSTLDLIBS_sign-file = $(CRYPTO_LIBS) ++HOSTCFLAGS_extract-cert.o = $(CRYPTO_CFLAGS) ++HOSTLDLIBS_extract-cert = $(CRYPTO_LIBS) + + ifdef CONFIG_UNWINDER_ORC + ifeq ($(ARCH),x86_64) From 13badaf33496ce88a04ecf1d9047925c30ec6d11 Mon Sep 17 00:00:00 2001 From: Igor Pecovnik Date: Sat, 13 Feb 2021 22:30:59 +0100 Subject: [PATCH 6/7] sunxi - add upstream patch --- .../sunxi-current/patch-5.10.15-16.patch | 2316 +++++++++++++++++ 1 file changed, 2316 insertions(+) create mode 100644 patch/kernel/sunxi-current/patch-5.10.15-16.patch diff --git a/patch/kernel/sunxi-current/patch-5.10.15-16.patch b/patch/kernel/sunxi-current/patch-5.10.15-16.patch new file mode 100644 index 000000000..01d52a4be --- /dev/null +++ b/patch/kernel/sunxi-current/patch-5.10.15-16.patch @@ -0,0 +1,2316 @@ +diff --git a/Makefile b/Makefile +index b62d2d4ea7b02..9a1f26680d836 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 10 +-SUBLEVEL = 15 ++SUBLEVEL = 16 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c +index 8dad44262e751..495ffc9cf5e22 100644 +--- a/arch/powerpc/kernel/vdso.c ++++ b/arch/powerpc/kernel/vdso.c +@@ -475,7 +475,7 @@ static __init void vdso_setup_trampolines(struct lib32_elfinfo *v32, + */ + + #ifdef CONFIG_PPC64 +- vdso64_rt_sigtramp = find_function64(v64, "__kernel_sigtramp_rt64"); ++ vdso64_rt_sigtramp = find_function64(v64, "__kernel_start_sigtramp_rt64"); + #endif + vdso32_sigtramp = find_function32(v32, "__kernel_sigtramp32"); + vdso32_rt_sigtramp = find_function32(v32, "__kernel_sigtramp_rt32"); +diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S +index bbf68cd01088b..2d4067561293e 100644 +--- a/arch/powerpc/kernel/vdso64/sigtramp.S ++++ b/arch/powerpc/kernel/vdso64/sigtramp.S +@@ -15,11 +15,20 @@ + + .text + ++/* ++ * __kernel_start_sigtramp_rt64 and __kernel_sigtramp_rt64 together ++ * are one function split in two parts. The kernel jumps to the former ++ * and the signal handler indirectly (by blr) returns to the latter. ++ * __kernel_sigtramp_rt64 needs to point to the return address so ++ * glibc can correctly identify the trampoline stack frame. ++ */ + .balign 8 + .balign IFETCH_ALIGN_BYTES +-V_FUNCTION_BEGIN(__kernel_sigtramp_rt64) ++V_FUNCTION_BEGIN(__kernel_start_sigtramp_rt64) + .Lsigrt_start: + bctrl /* call the handler */ ++V_FUNCTION_END(__kernel_start_sigtramp_rt64) ++V_FUNCTION_BEGIN(__kernel_sigtramp_rt64) + addi r1, r1, __SIGNAL_FRAMESIZE + li r0,__NR_rt_sigreturn + sc +diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S +index 256fb97202987..bd120f590b9ed 100644 +--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S ++++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S +@@ -150,6 +150,7 @@ VERSION + __kernel_get_tbfreq; + __kernel_sync_dicache; + __kernel_sync_dicache_p5; ++ __kernel_start_sigtramp_rt64; + __kernel_sigtramp_rt64; + __kernel_getcpu; + __kernel_time; +diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c +index 54fbe1e80cc41..f13688c4b9317 100644 +--- a/block/blk-cgroup.c ++++ b/block/blk-cgroup.c +@@ -1017,6 +1017,8 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css) + */ + void blkcg_destroy_blkgs(struct blkcg *blkcg) + { ++ might_sleep(); ++ + spin_lock_irq(&blkcg->lock); + + while (!hlist_empty(&blkcg->blkg_list)) { +@@ -1024,14 +1026,20 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg) + struct blkcg_gq, blkcg_node); + struct request_queue *q = blkg->q; + +- if (spin_trylock(&q->queue_lock)) { +- blkg_destroy(blkg); +- spin_unlock(&q->queue_lock); +- } else { ++ if (need_resched() || !spin_trylock(&q->queue_lock)) { ++ /* ++ * Given that the system can accumulate a huge number ++ * of blkgs in pathological cases, check to see if we ++ * need to rescheduling to avoid softlockup. ++ */ + spin_unlock_irq(&blkcg->lock); +- cpu_relax(); ++ cond_resched(); + spin_lock_irq(&blkcg->lock); ++ continue; + } ++ ++ blkg_destroy(blkg); ++ spin_unlock(&q->queue_lock); + } + + spin_unlock_irq(&blkcg->lock); +diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c +index 689c06cbbb457..ade3ecf2ee495 100644 +--- a/drivers/gpio/gpiolib-cdev.c ++++ b/drivers/gpio/gpiolib-cdev.c +@@ -756,6 +756,8 @@ static void edge_detector_stop(struct line *line) + cancel_delayed_work_sync(&line->work); + WRITE_ONCE(line->sw_debounced, 0); + line->eflags = 0; ++ if (line->desc) ++ WRITE_ONCE(line->desc->debounce_period_us, 0); + /* do not change line->level - see comment in debounced_value() */ + } + +diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c +index 40dfb4d0ffbec..db62e6a934d91 100644 +--- a/drivers/gpu/drm/i915/display/intel_ddi.c ++++ b/drivers/gpu/drm/i915/display/intel_ddi.c +@@ -2597,6 +2597,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, + u32 n_entries, val; + int ln, rate = 0; + ++ if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT) ++ return; ++ + if (type != INTEL_OUTPUT_HDMI) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + +@@ -2605,12 +2608,11 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, + + ddi_translations = icl_get_mg_buf_trans(encoder, type, rate, + &n_entries); +- /* The table does not have values for level 3 and level 9. */ +- if (level >= n_entries || level == 3 || level == 9) { ++ if (level >= n_entries) { + drm_dbg_kms(&dev_priv->drm, + "DDI translation not found for level %d. Using %d instead.", +- level, n_entries - 2); +- level = n_entries - 2; ++ level, n_entries - 1); ++ level = n_entries - 1; + } + + /* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */ +@@ -2742,6 +2744,9 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock, + u32 n_entries, val, ln, dpcnt_mask, dpcnt_val; + int rate = 0; + ++ if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT) ++ return; ++ + if (type != INTEL_OUTPUT_HDMI) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + +diff --git a/drivers/gpu/drm/nouveau/include/nvif/push.h b/drivers/gpu/drm/nouveau/include/nvif/push.h +index 168d7694ede5c..6d3a8a3d2087b 100644 +--- a/drivers/gpu/drm/nouveau/include/nvif/push.h ++++ b/drivers/gpu/drm/nouveau/include/nvif/push.h +@@ -123,131 +123,131 @@ PUSH_KICK(struct nvif_push *push) + } while(0) + #endif + +-#define PUSH_1(X,f,ds,n,c,o,p,s,mA,dA) do { \ +- PUSH_##o##_HDR((p), s, mA, (c)+(n)); \ +- PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \ ++#define PUSH_1(X,f,ds,n,o,p,s,mA,dA) do { \ ++ PUSH_##o##_HDR((p), s, mA, (ds)+(n)); \ ++ PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \ + } while(0) +-#define PUSH_2(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ +- PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \ +- PUSH_1(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ +- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ ++#define PUSH_2(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ ++ PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \ ++ PUSH_1(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ ++ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ + } while(0) +-#define PUSH_3(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ +- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \ +- PUSH_2(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ +- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ ++#define PUSH_3(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ ++ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \ ++ PUSH_2(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ ++ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ + } while(0) +-#define PUSH_4(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ +- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \ +- PUSH_3(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ +- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ ++#define PUSH_4(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ ++ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \ ++ PUSH_3(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ ++ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ + } while(0) +-#define PUSH_5(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ +- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \ +- PUSH_4(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ +- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ ++#define PUSH_5(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ ++ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \ ++ PUSH_4(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ ++ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ + } while(0) +-#define PUSH_6(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ +- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \ +- PUSH_5(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ +- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ ++#define PUSH_6(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ ++ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \ ++ PUSH_5(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ ++ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ + } while(0) +-#define PUSH_7(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ +- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \ +- PUSH_6(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ +- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ ++#define PUSH_7(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ ++ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \ ++ PUSH_6(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ ++ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ + } while(0) +-#define PUSH_8(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ +- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \ +- PUSH_7(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ +- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ ++#define PUSH_8(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ ++ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \ ++ PUSH_7(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ ++ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ + } while(0) +-#define PUSH_9(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ +- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \ +- PUSH_8(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ +- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ ++#define PUSH_9(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ ++ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \ ++ PUSH_8(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ ++ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ + } while(0) +-#define PUSH_10(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ +- PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \ +- PUSH_9(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ +- PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ ++#define PUSH_10(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ ++ PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \ ++ PUSH_9(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ ++ PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ + } while(0) + +-#define PUSH_1D(X,o,p,s,mA,dA) \ +- PUSH_1(X, DATA_, 1, 1, 0, o, (p), s, X##mA, (dA)) +-#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \ +- PUSH_2(X, DATA_, 1, 1, 0, o, (p), s, X##mB, (dB), \ +- X##mA, (dA)) +-#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \ +- PUSH_3(X, DATA_, 1, 1, 0, o, (p), s, X##mC, (dC), \ +- X##mB, (dB), \ +- X##mA, (dA)) +-#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \ +- PUSH_4(X, DATA_, 1, 1, 0, o, (p), s, X##mD, (dD), \ +- X##mC, (dC), \ +- X##mB, (dB), \ +- X##mA, (dA)) +-#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \ +- PUSH_5(X, DATA_, 1, 1, 0, o, (p), s, X##mE, (dE), \ +- X##mD, (dD), \ +- X##mC, (dC), \ +- X##mB, (dB), \ +- X##mA, (dA)) ++#define PUSH_1D(X,o,p,s,mA,dA) \ ++ PUSH_1(X, DATA_, 1, 0, o, (p), s, X##mA, (dA)) ++#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \ ++ PUSH_2(X, DATA_, 1, 0, o, (p), s, X##mB, (dB), \ ++ X##mA, (dA)) ++#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \ ++ PUSH_3(X, DATA_, 1, 0, o, (p), s, X##mC, (dC), \ ++ X##mB, (dB), \ ++ X##mA, (dA)) ++#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \ ++ PUSH_4(X, DATA_, 1, 0, o, (p), s, X##mD, (dD), \ ++ X##mC, (dC), \ ++ X##mB, (dB), \ ++ X##mA, (dA)) ++#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \ ++ PUSH_5(X, DATA_, 1, 0, o, (p), s, X##mE, (dE), \ ++ X##mD, (dD), \ ++ X##mC, (dC), \ ++ X##mB, (dB), \ ++ X##mA, (dA)) + #define PUSH_6D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF) \ +- PUSH_6(X, DATA_, 1, 1, 0, o, (p), s, X##mF, (dF), \ +- X##mE, (dE), \ +- X##mD, (dD), \ +- X##mC, (dC), \ +- X##mB, (dB), \ +- X##mA, (dA)) ++ PUSH_6(X, DATA_, 1, 0, o, (p), s, X##mF, (dF), \ ++ X##mE, (dE), \ ++ X##mD, (dD), \ ++ X##mC, (dC), \ ++ X##mB, (dB), \ ++ X##mA, (dA)) + #define PUSH_7D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG) \ +- PUSH_7(X, DATA_, 1, 1, 0, o, (p), s, X##mG, (dG), \ +- X##mF, (dF), \ +- X##mE, (dE), \ +- X##mD, (dD), \ +- X##mC, (dC), \ +- X##mB, (dB), \ +- X##mA, (dA)) ++ PUSH_7(X, DATA_, 1, 0, o, (p), s, X##mG, (dG), \ ++ X##mF, (dF), \ ++ X##mE, (dE), \ ++ X##mD, (dD), \ ++ X##mC, (dC), \ ++ X##mB, (dB), \ ++ X##mA, (dA)) + #define PUSH_8D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH) \ +- PUSH_8(X, DATA_, 1, 1, 0, o, (p), s, X##mH, (dH), \ +- X##mG, (dG), \ +- X##mF, (dF), \ +- X##mE, (dE), \ +- X##mD, (dD), \ +- X##mC, (dC), \ +- X##mB, (dB), \ +- X##mA, (dA)) ++ PUSH_8(X, DATA_, 1, 0, o, (p), s, X##mH, (dH), \ ++ X##mG, (dG), \ ++ X##mF, (dF), \ ++ X##mE, (dE), \ ++ X##mD, (dD), \ ++ X##mC, (dC), \ ++ X##mB, (dB), \ ++ X##mA, (dA)) + #define PUSH_9D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI) \ +- PUSH_9(X, DATA_, 1, 1, 0, o, (p), s, X##mI, (dI), \ +- X##mH, (dH), \ +- X##mG, (dG), \ +- X##mF, (dF), \ +- X##mE, (dE), \ +- X##mD, (dD), \ +- X##mC, (dC), \ +- X##mB, (dB), \ +- X##mA, (dA)) ++ PUSH_9(X, DATA_, 1, 0, o, (p), s, X##mI, (dI), \ ++ X##mH, (dH), \ ++ X##mG, (dG), \ ++ X##mF, (dF), \ ++ X##mE, (dE), \ ++ X##mD, (dD), \ ++ X##mC, (dC), \ ++ X##mB, (dB), \ ++ X##mA, (dA)) + #define PUSH_10D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI,mJ,dJ) \ +- PUSH_10(X, DATA_, 1, 1, 0, o, (p), s, X##mJ, (dJ), \ +- X##mI, (dI), \ +- X##mH, (dH), \ +- X##mG, (dG), \ +- X##mF, (dF), \ +- X##mE, (dE), \ +- X##mD, (dD), \ +- X##mC, (dC), \ +- X##mB, (dB), \ +- X##mA, (dA)) ++ PUSH_10(X, DATA_, 1, 0, o, (p), s, X##mJ, (dJ), \ ++ X##mI, (dI), \ ++ X##mH, (dH), \ ++ X##mG, (dG), \ ++ X##mF, (dF), \ ++ X##mE, (dE), \ ++ X##mD, (dD), \ ++ X##mC, (dC), \ ++ X##mB, (dB), \ ++ X##mA, (dA)) + +-#define PUSH_1P(X,o,p,s,mA,dp,ds) \ +- PUSH_1(X, DATAp, ds, ds, 0, o, (p), s, X##mA, (dp)) +-#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \ +- PUSH_2(X, DATAp, ds, ds, 0, o, (p), s, X##mB, (dp), \ +- X##mA, (dA)) +-#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \ +- PUSH_3(X, DATAp, ds, ds, 0, o, (p), s, X##mC, (dp), \ +- X##mB, (dB), \ +- X##mA, (dA)) ++#define PUSH_1P(X,o,p,s,mA,dp,ds) \ ++ PUSH_1(X, DATAp, ds, 0, o, (p), s, X##mA, (dp)) ++#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \ ++ PUSH_2(X, DATAp, ds, 0, o, (p), s, X##mB, (dp), \ ++ X##mA, (dA)) ++#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \ ++ PUSH_3(X, DATAp, ds, 0, o, (p), s, X##mC, (dp), \ ++ X##mB, (dB), \ ++ X##mA, (dA)) + + #define PUSH_(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,IMPL,...) IMPL + #define PUSH(A...) PUSH_(A, PUSH_10P, PUSH_10D, \ +diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c +index 0818d3e507347..2ffd2f354d0ae 100644 +--- a/drivers/i2c/busses/i2c-mt65xx.c ++++ b/drivers/i2c/busses/i2c-mt65xx.c +@@ -1275,7 +1275,8 @@ static int mtk_i2c_probe(struct platform_device *pdev) + mtk_i2c_clock_disable(i2c); + + ret = devm_request_irq(&pdev->dev, irq, mtk_i2c_irq, +- IRQF_TRIGGER_NONE, I2C_DRV_NAME, i2c); ++ IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE, ++ I2C_DRV_NAME, i2c); + if (ret < 0) { + dev_err(&pdev->dev, + "Request I2C IRQ %d fail\n", irq); +@@ -1302,7 +1303,16 @@ static int mtk_i2c_remove(struct platform_device *pdev) + } + + #ifdef CONFIG_PM_SLEEP +-static int mtk_i2c_resume(struct device *dev) ++static int mtk_i2c_suspend_noirq(struct device *dev) ++{ ++ struct mtk_i2c *i2c = dev_get_drvdata(dev); ++ ++ i2c_mark_adapter_suspended(&i2c->adap); ++ ++ return 0; ++} ++ ++static int mtk_i2c_resume_noirq(struct device *dev) + { + int ret; + struct mtk_i2c *i2c = dev_get_drvdata(dev); +@@ -1317,12 +1327,15 @@ static int mtk_i2c_resume(struct device *dev) + + mtk_i2c_clock_disable(i2c); + ++ i2c_mark_adapter_resumed(&i2c->adap); ++ + return 0; + } + #endif + + static const struct dev_pm_ops mtk_i2c_pm = { +- SET_SYSTEM_SLEEP_PM_OPS(NULL, mtk_i2c_resume) ++ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_i2c_suspend_noirq, ++ mtk_i2c_resume_noirq) + }; + + static struct platform_driver mtk_i2c_driver = { +diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c +index 5beec901713fb..a262c949ed76b 100644 +--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c ++++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c +@@ -1158,11 +1158,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk, + #endif + } + if (!n || !n->dev) +- goto free_sk; ++ goto free_dst; + + ndev = n->dev; +- if (!ndev) +- goto free_dst; + if (is_vlan_dev(ndev)) + ndev = vlan_dev_real_dev(ndev); + +@@ -1249,7 +1247,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk, + free_csk: + chtls_sock_release(&csk->kref); + free_dst: +- neigh_release(n); ++ if (n) ++ neigh_release(n); + dst_release(dst); + free_sk: + inet_csk_prepare_forced_close(newsk); +diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +index d2bbe6a735142..92c50efd48fc3 100644 +--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c ++++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +@@ -358,6 +358,7 @@ const struct iwl_cfg_trans_params iwl_ma_trans_cfg = { + const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101"; + const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz"; + const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz"; ++const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203"; + const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6 AX211 160MHz"; + const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6 AX411 160MHz"; + const char iwl_ma_name[] = "Intel(R) Wi-Fi 6"; +@@ -384,6 +385,18 @@ const struct iwl_cfg iwl_qu_b0_hr1_b0 = { + .num_rbds = IWL_NUM_RBDS_22000_HE, + }; + ++const struct iwl_cfg iwl_qu_b0_hr_b0 = { ++ .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, ++ IWL_DEVICE_22500, ++ /* ++ * This device doesn't support receiving BlockAck with a large bitmap ++ * so we need to restrict the size of transmitted aggregation to the ++ * HT size; mac80211 would otherwise pick the HE max (256) by default. ++ */ ++ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, ++ .num_rbds = IWL_NUM_RBDS_22000_HE, ++}; ++ + const struct iwl_cfg iwl_ax201_cfg_qu_hr = { + .name = "Intel(R) Wi-Fi 6 AX201 160MHz", + .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, +@@ -410,6 +423,18 @@ const struct iwl_cfg iwl_qu_c0_hr1_b0 = { + .num_rbds = IWL_NUM_RBDS_22000_HE, + }; + ++const struct iwl_cfg iwl_qu_c0_hr_b0 = { ++ .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, ++ IWL_DEVICE_22500, ++ /* ++ * This device doesn't support receiving BlockAck with a large bitmap ++ * so we need to restrict the size of transmitted aggregation to the ++ * HT size; mac80211 would otherwise pick the HE max (256) by default. ++ */ ++ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, ++ .num_rbds = IWL_NUM_RBDS_22000_HE, ++}; ++ + const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = { + .name = "Intel(R) Wi-Fi 6 AX201 160MHz", + .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h +index e82e3fc963be2..9b91aa9b2e7f1 100644 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h +@@ -544,6 +544,7 @@ extern const char iwl9260_killer_1550_name[]; + extern const char iwl9560_killer_1550i_name[]; + extern const char iwl9560_killer_1550s_name[]; + extern const char iwl_ax200_name[]; ++extern const char iwl_ax203_name[]; + extern const char iwl_ax201_name[]; + extern const char iwl_ax101_name[]; + extern const char iwl_ax200_killer_1650w_name[]; +@@ -627,6 +628,8 @@ extern const struct iwl_cfg iwl9560_2ac_cfg_soc; + extern const struct iwl_cfg iwl_qu_b0_hr1_b0; + extern const struct iwl_cfg iwl_qu_c0_hr1_b0; + extern const struct iwl_cfg iwl_quz_a0_hr1_b0; ++extern const struct iwl_cfg iwl_qu_b0_hr_b0; ++extern const struct iwl_cfg iwl_qu_c0_hr_b0; + extern const struct iwl_cfg iwl_ax200_cfg_cc; + extern const struct iwl_cfg iwl_ax201_cfg_qu_hr; + extern const struct iwl_cfg iwl_ax201_cfg_qu_hr; +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +index f043eefabb4ec..7b1d2dac6ceb8 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +@@ -514,7 +514,10 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file, + const size_t bufsz = sizeof(buf); + int pos = 0; + ++ mutex_lock(&mvm->mutex); + iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os); ++ mutex_unlock(&mvm->mutex); ++ + do_div(curr_os, NSEC_PER_USEC); + diff = curr_os - curr_gp2; + pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff); +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +index b627e7da7ac9d..d42165559df6e 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +@@ -4249,6 +4249,9 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, + iwl_mvm_binding_remove_vif(mvm, vif); + + out: ++ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) && ++ switching_chanctx) ++ return; + mvmvif->phy_ctxt = NULL; + iwl_mvm_power_update_mac(mvm); + } +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +index 0d1118f66f0d5..cb83490f1016f 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +@@ -845,6 +845,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, + if (!mvm->scan_cmd) + goto out_free; + ++ /* invalidate ids to prevent accidental removal of sta_id 0 */ ++ mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA; ++ mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA; ++ + /* Set EBS as successful as long as not stated otherwise by the FW. */ + mvm->last_ebs_successful = true; + +@@ -1245,6 +1249,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk) + reprobe = container_of(wk, struct iwl_mvm_reprobe, work); + if (device_reprobe(reprobe->dev)) + dev_err(reprobe->dev, "reprobe failed!\n"); ++ put_device(reprobe->dev); + kfree(reprobe); + module_put(THIS_MODULE); + } +@@ -1295,7 +1300,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) + module_put(THIS_MODULE); + return; + } +- reprobe->dev = mvm->trans->dev; ++ reprobe->dev = get_device(mvm->trans->dev); + INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); + schedule_work(&reprobe->work); + } else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +index 799d8219463cb..a66a5c19474a9 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +@@ -2103,6 +2103,9 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) + + lockdep_assert_held(&mvm->mutex); + ++ if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA)) ++ return -EINVAL; ++ + iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0); + ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); + if (ret) +@@ -2117,6 +2120,9 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) + + lockdep_assert_held(&mvm->mutex); + ++ if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA)) ++ return -EINVAL; ++ + iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0); + ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id); + if (ret) +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +index d719e433a59bf..2d43899fbdd7a 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +@@ -245,8 +245,10 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, + /* Allocate IML */ + iml_img = dma_alloc_coherent(trans->dev, trans->iml_len, + &trans_pcie->iml_dma_addr, GFP_KERNEL); +- if (!iml_img) +- return -ENOMEM; ++ if (!iml_img) { ++ ret = -ENOMEM; ++ goto err_free_ctxt_info; ++ } + + memcpy(iml_img, trans->iml, trans->iml_len); + +@@ -284,6 +286,11 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, + + return 0; + ++err_free_ctxt_info: ++ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), ++ trans_pcie->ctxt_info_gen3, ++ trans_pcie->ctxt_info_dma_addr); ++ trans_pcie->ctxt_info_gen3 = NULL; + err_free_prph_info: + dma_free_coherent(trans->dev, + sizeof(*prph_info), +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +index 7b5ece380fbfb..2823a1e81656d 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +@@ -966,6 +966,11 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, + iwl_qu_b0_hr1_b0, iwl_ax101_name), ++ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, ++ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, ++ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, ++ IWL_CFG_ANY, IWL_CFG_ANY, ++ iwl_qu_b0_hr_b0, iwl_ax203_name), + + /* Qu C step */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, +@@ -973,6 +978,11 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, + iwl_qu_c0_hr1_b0, iwl_ax101_name), ++ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, ++ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, ++ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, ++ IWL_CFG_ANY, IWL_CFG_ANY, ++ iwl_qu_c0_hr_b0, iwl_ax203_name), + + /* QuZ */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +index 966be5689d63a..ed54d04e43964 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +@@ -299,6 +299,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans->txqs.txq[txq_id]; + ++ if (!txq) { ++ IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); ++ return; ++ } ++ + spin_lock_bh(&txq->lock); + while (txq->write_ptr != txq->read_ptr) { + IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", +diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c +index af0b27a68d84d..9181221a2434d 100644 +--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c ++++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c +@@ -887,10 +887,8 @@ void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) + int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); + struct sk_buff *skb = txq->entries[idx].skb; + +- if (WARN_ON_ONCE(!skb)) +- continue; +- +- iwl_txq_free_tso_page(trans, skb); ++ if (!WARN_ON_ONCE(!skb)) ++ iwl_txq_free_tso_page(trans, skb); + } + iwl_txq_gen2_free_tfd(trans, txq); + txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c +index 42bbd99a36acf..35098dbd32a3c 100644 +--- a/drivers/regulator/core.c ++++ b/drivers/regulator/core.c +@@ -1813,13 +1813,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) + { + struct regulator_dev *r; + struct device *dev = rdev->dev.parent; +- int ret; ++ int ret = 0; + + /* No supply to resolve? */ + if (!rdev->supply_name) + return 0; + +- /* Supply already resolved? */ ++ /* Supply already resolved? (fast-path without locking contention) */ + if (rdev->supply) + return 0; + +@@ -1829,7 +1829,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) + + /* Did the lookup explicitly defer for us? */ + if (ret == -EPROBE_DEFER) +- return ret; ++ goto out; + + if (have_full_constraints()) { + r = dummy_regulator_rdev; +@@ -1837,15 +1837,18 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) + } else { + dev_err(dev, "Failed to resolve %s-supply for %s\n", + rdev->supply_name, rdev->desc->name); +- return -EPROBE_DEFER; ++ ret = -EPROBE_DEFER; ++ goto out; + } + } + + if (r == rdev) { + dev_err(dev, "Supply for %s (%s) resolved to itself\n", + rdev->desc->name, rdev->supply_name); +- if (!have_full_constraints()) +- return -EINVAL; ++ if (!have_full_constraints()) { ++ ret = -EINVAL; ++ goto out; ++ } + r = dummy_regulator_rdev; + get_device(&r->dev); + } +@@ -1859,7 +1862,8 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) + if (r->dev.parent && r->dev.parent != rdev->dev.parent) { + if (!device_is_bound(r->dev.parent)) { + put_device(&r->dev); +- return -EPROBE_DEFER; ++ ret = -EPROBE_DEFER; ++ goto out; + } + } + +@@ -1867,15 +1871,32 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) + ret = regulator_resolve_supply(r); + if (ret < 0) { + put_device(&r->dev); +- return ret; ++ goto out; ++ } ++ ++ /* ++ * Recheck rdev->supply with rdev->mutex lock held to avoid a race ++ * between rdev->supply null check and setting rdev->supply in ++ * set_supply() from concurrent tasks. ++ */ ++ regulator_lock(rdev); ++ ++ /* Supply just resolved by a concurrent task? */ ++ if (rdev->supply) { ++ regulator_unlock(rdev); ++ put_device(&r->dev); ++ goto out; + } + + ret = set_supply(rdev, r); + if (ret < 0) { ++ regulator_unlock(rdev); + put_device(&r->dev); +- return ret; ++ goto out; + } + ++ regulator_unlock(rdev); ++ + /* + * In set_machine_constraints() we may have turned this regulator on + * but we couldn't propagate to the supply if it hadn't been resolved +@@ -1886,11 +1907,12 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) + if (ret < 0) { + _regulator_put(rdev->supply); + rdev->supply = NULL; +- return ret; ++ goto out; + } + } + +- return 0; ++out: ++ return ret; + } + + /* Internal regulator request function */ +diff --git a/fs/io-wq.c b/fs/io-wq.c +index b53c055bea6a3..f72d53848dcbc 100644 +--- a/fs/io-wq.c ++++ b/fs/io-wq.c +@@ -1078,16 +1078,6 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, + return IO_WQ_CANCEL_NOTFOUND; + } + +-static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data) +-{ +- return work == data; +-} +- +-enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork) +-{ +- return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork, false); +-} +- + struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) + { + int ret = -ENOMEM, node; +diff --git a/fs/io-wq.h b/fs/io-wq.h +index aaa363f358916..75113bcd5889f 100644 +--- a/fs/io-wq.h ++++ b/fs/io-wq.h +@@ -130,7 +130,6 @@ static inline bool io_wq_is_hashed(struct io_wq_work *work) + } + + void io_wq_cancel_all(struct io_wq *wq); +-enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork); + + typedef bool (work_cancel_fn)(struct io_wq_work *, void *); + +diff --git a/fs/io_uring.c b/fs/io_uring.c +index 3b6307f6bd93d..d0b7332ca7033 100644 +--- a/fs/io_uring.c ++++ b/fs/io_uring.c +@@ -286,7 +286,6 @@ struct io_ring_ctx { + struct list_head timeout_list; + struct list_head cq_overflow_list; + +- wait_queue_head_t inflight_wait; + struct io_uring_sqe *sq_sqes; + } ____cacheline_aligned_in_smp; + +@@ -997,6 +996,43 @@ static inline void io_clean_op(struct io_kiocb *req) + __io_clean_op(req); + } + ++static inline bool __io_match_files(struct io_kiocb *req, ++ struct files_struct *files) ++{ ++ if (req->file && req->file->f_op == &io_uring_fops) ++ return true; ++ ++ return ((req->flags & REQ_F_WORK_INITIALIZED) && ++ (req->work.flags & IO_WQ_WORK_FILES)) && ++ req->work.identity->files == files; ++} ++ ++static bool io_match_task(struct io_kiocb *head, ++ struct task_struct *task, ++ struct files_struct *files) ++{ ++ struct io_kiocb *link; ++ ++ if (task && head->task != task) { ++ /* in terms of cancelation, always match if req task is dead */ ++ if (head->task->flags & PF_EXITING) ++ return true; ++ return false; ++ } ++ if (!files) ++ return true; ++ if (__io_match_files(head, files)) ++ return true; ++ if (head->flags & REQ_F_LINK_HEAD) { ++ list_for_each_entry(link, &head->link_list, link_list) { ++ if (__io_match_files(link, files)) ++ return true; ++ } ++ } ++ return false; ++} ++ ++ + static void io_sq_thread_drop_mm(void) + { + struct mm_struct *mm = current->mm; +@@ -1183,7 +1219,6 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) + INIT_LIST_HEAD(&ctx->iopoll_list); + INIT_LIST_HEAD(&ctx->defer_list); + INIT_LIST_HEAD(&ctx->timeout_list); +- init_waitqueue_head(&ctx->inflight_wait); + spin_lock_init(&ctx->inflight_lock); + INIT_LIST_HEAD(&ctx->inflight_list); + INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work); +@@ -1368,11 +1403,14 @@ static bool io_grab_identity(struct io_kiocb *req) + return false; + atomic_inc(&id->files->count); + get_nsproxy(id->nsproxy); +- req->flags |= REQ_F_INFLIGHT; + +- spin_lock_irq(&ctx->inflight_lock); +- list_add(&req->inflight_entry, &ctx->inflight_list); +- spin_unlock_irq(&ctx->inflight_lock); ++ if (!(req->flags & REQ_F_INFLIGHT)) { ++ req->flags |= REQ_F_INFLIGHT; ++ ++ spin_lock_irq(&ctx->inflight_lock); ++ list_add(&req->inflight_entry, &ctx->inflight_list); ++ spin_unlock_irq(&ctx->inflight_lock); ++ } + req->work.flags |= IO_WQ_WORK_FILES; + } + if (!(req->work.flags & IO_WQ_WORK_MM) && +@@ -1466,30 +1504,18 @@ static void io_kill_timeout(struct io_kiocb *req) + } + } + +-static bool io_task_match(struct io_kiocb *req, struct task_struct *tsk) +-{ +- struct io_ring_ctx *ctx = req->ctx; +- +- if (!tsk || req->task == tsk) +- return true; +- if (ctx->flags & IORING_SETUP_SQPOLL) { +- if (ctx->sq_data && req->task == ctx->sq_data->thread) +- return true; +- } +- return false; +-} +- + /* + * Returns true if we found and killed one or more timeouts + */ +-static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk) ++static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, ++ struct files_struct *files) + { + struct io_kiocb *req, *tmp; + int canceled = 0; + + spin_lock_irq(&ctx->completion_lock); + list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { +- if (io_task_match(req, tsk)) { ++ if (io_match_task(req, tsk, files)) { + io_kill_timeout(req); + canceled++; + } +@@ -1616,32 +1642,6 @@ static void io_cqring_mark_overflow(struct io_ring_ctx *ctx) + } + } + +-static inline bool __io_match_files(struct io_kiocb *req, +- struct files_struct *files) +-{ +- return ((req->flags & REQ_F_WORK_INITIALIZED) && +- (req->work.flags & IO_WQ_WORK_FILES)) && +- req->work.identity->files == files; +-} +- +-static bool io_match_files(struct io_kiocb *req, +- struct files_struct *files) +-{ +- struct io_kiocb *link; +- +- if (!files) +- return true; +- if (__io_match_files(req, files)) +- return true; +- if (req->flags & REQ_F_LINK_HEAD) { +- list_for_each_entry(link, &req->link_list, link_list) { +- if (__io_match_files(link, files)) +- return true; +- } +- } +- return false; +-} +- + /* Returns true if there are no backlogged entries after the flush */ + static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, + struct task_struct *tsk, +@@ -1663,9 +1663,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, + + cqe = NULL; + list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) { +- if (tsk && req->task != tsk) +- continue; +- if (!io_match_files(req, files)) ++ if (!io_match_task(req, tsk, files)) + continue; + + cqe = io_get_cqring(ctx); +@@ -2086,6 +2084,9 @@ static void __io_req_task_submit(struct io_kiocb *req) + else + __io_req_task_cancel(req, -EFAULT); + mutex_unlock(&ctx->uring_lock); ++ ++ if (ctx->flags & IORING_SETUP_SQPOLL) ++ io_sq_thread_drop_mm(); + } + + static void io_req_task_submit(struct callback_head *cb) +@@ -5314,7 +5315,8 @@ static bool io_poll_remove_one(struct io_kiocb *req) + /* + * Returns true if we found and killed one or more poll requests + */ +-static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk) ++static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, ++ struct files_struct *files) + { + struct hlist_node *tmp; + struct io_kiocb *req; +@@ -5326,7 +5328,7 @@ static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk) + + list = &ctx->cancel_hash[i]; + hlist_for_each_entry_safe(req, tmp, list, hash_node) { +- if (io_task_match(req, tsk)) ++ if (io_match_task(req, tsk, files)) + posted += io_poll_remove_one(req); + } + } +@@ -5893,17 +5895,20 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe) + static void io_req_drop_files(struct io_kiocb *req) + { + struct io_ring_ctx *ctx = req->ctx; ++ struct io_uring_task *tctx = req->task->io_uring; + unsigned long flags; + +- put_files_struct(req->work.identity->files); +- put_nsproxy(req->work.identity->nsproxy); ++ if (req->work.flags & IO_WQ_WORK_FILES) { ++ put_files_struct(req->work.identity->files); ++ put_nsproxy(req->work.identity->nsproxy); ++ } + spin_lock_irqsave(&ctx->inflight_lock, flags); + list_del(&req->inflight_entry); + spin_unlock_irqrestore(&ctx->inflight_lock, flags); + req->flags &= ~REQ_F_INFLIGHT; + req->work.flags &= ~IO_WQ_WORK_FILES; +- if (waitqueue_active(&ctx->inflight_wait)) +- wake_up(&ctx->inflight_wait); ++ if (atomic_read(&tctx->in_idle)) ++ wake_up(&tctx->wait); + } + + static void __io_clean_op(struct io_kiocb *req) +@@ -6168,6 +6173,16 @@ static struct file *io_file_get(struct io_submit_state *state, + file = __io_file_get(state, fd); + } + ++ if (file && file->f_op == &io_uring_fops && ++ !(req->flags & REQ_F_INFLIGHT)) { ++ io_req_init_async(req); ++ req->flags |= REQ_F_INFLIGHT; ++ ++ spin_lock_irq(&ctx->inflight_lock); ++ list_add(&req->inflight_entry, &ctx->inflight_list); ++ spin_unlock_irq(&ctx->inflight_lock); ++ } ++ + return file; + } + +@@ -6989,14 +7004,18 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, + TASK_INTERRUPTIBLE); + /* make sure we run task_work before checking for signals */ + ret = io_run_task_work_sig(); +- if (ret > 0) ++ if (ret > 0) { ++ finish_wait(&ctx->wait, &iowq.wq); + continue; ++ } + else if (ret < 0) + break; + if (io_should_wake(&iowq)) + break; +- if (test_bit(0, &ctx->cq_check_overflow)) ++ if (test_bit(0, &ctx->cq_check_overflow)) { ++ finish_wait(&ctx->wait, &iowq.wq); + continue; ++ } + schedule(); + } while (1); + finish_wait(&ctx->wait, &iowq.wq); +@@ -8487,8 +8506,8 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) + __io_cqring_overflow_flush(ctx, true, NULL, NULL); + mutex_unlock(&ctx->uring_lock); + +- io_kill_timeouts(ctx, NULL); +- io_poll_remove_all(ctx, NULL); ++ io_kill_timeouts(ctx, NULL, NULL); ++ io_poll_remove_all(ctx, NULL, NULL); + + if (ctx->io_wq) + io_wq_cancel_cb(ctx->io_wq, io_cancel_ctx_cb, ctx, true); +@@ -8524,112 +8543,31 @@ static int io_uring_release(struct inode *inode, struct file *file) + return 0; + } + +-/* +- * Returns true if 'preq' is the link parent of 'req' +- */ +-static bool io_match_link(struct io_kiocb *preq, struct io_kiocb *req) +-{ +- struct io_kiocb *link; +- +- if (!(preq->flags & REQ_F_LINK_HEAD)) +- return false; +- +- list_for_each_entry(link, &preq->link_list, link_list) { +- if (link == req) +- return true; +- } +- +- return false; +-} +- +-/* +- * We're looking to cancel 'req' because it's holding on to our files, but +- * 'req' could be a link to another request. See if it is, and cancel that +- * parent request if so. +- */ +-static bool io_poll_remove_link(struct io_ring_ctx *ctx, struct io_kiocb *req) +-{ +- struct hlist_node *tmp; +- struct io_kiocb *preq; +- bool found = false; +- int i; +- +- spin_lock_irq(&ctx->completion_lock); +- for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { +- struct hlist_head *list; +- +- list = &ctx->cancel_hash[i]; +- hlist_for_each_entry_safe(preq, tmp, list, hash_node) { +- found = io_match_link(preq, req); +- if (found) { +- io_poll_remove_one(preq); +- break; +- } +- } +- } +- spin_unlock_irq(&ctx->completion_lock); +- return found; +-} +- +-static bool io_timeout_remove_link(struct io_ring_ctx *ctx, +- struct io_kiocb *req) +-{ +- struct io_kiocb *preq; +- bool found = false; +- +- spin_lock_irq(&ctx->completion_lock); +- list_for_each_entry(preq, &ctx->timeout_list, timeout.list) { +- found = io_match_link(preq, req); +- if (found) { +- __io_timeout_cancel(preq); +- break; +- } +- } +- spin_unlock_irq(&ctx->completion_lock); +- return found; +-} ++struct io_task_cancel { ++ struct task_struct *task; ++ struct files_struct *files; ++}; + +-static bool io_cancel_link_cb(struct io_wq_work *work, void *data) ++static bool io_cancel_task_cb(struct io_wq_work *work, void *data) + { + struct io_kiocb *req = container_of(work, struct io_kiocb, work); ++ struct io_task_cancel *cancel = data; + bool ret; + +- if (req->flags & REQ_F_LINK_TIMEOUT) { ++ if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) { + unsigned long flags; + struct io_ring_ctx *ctx = req->ctx; + + /* protect against races with linked timeouts */ + spin_lock_irqsave(&ctx->completion_lock, flags); +- ret = io_match_link(req, data); ++ ret = io_match_task(req, cancel->task, cancel->files); + spin_unlock_irqrestore(&ctx->completion_lock, flags); + } else { +- ret = io_match_link(req, data); ++ ret = io_match_task(req, cancel->task, cancel->files); + } + return ret; + } + +-static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req) +-{ +- enum io_wq_cancel cret; +- +- /* cancel this particular work, if it's running */ +- cret = io_wq_cancel_work(ctx->io_wq, &req->work); +- if (cret != IO_WQ_CANCEL_NOTFOUND) +- return; +- +- /* find links that hold this pending, cancel those */ +- cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_link_cb, req, true); +- if (cret != IO_WQ_CANCEL_NOTFOUND) +- return; +- +- /* if we have a poll link holding this pending, cancel that */ +- if (io_poll_remove_link(ctx, req)) +- return; +- +- /* final option, timeout link is holding this req pending */ +- io_timeout_remove_link(ctx, req); +-} +- + static void io_cancel_defer_files(struct io_ring_ctx *ctx, + struct task_struct *task, + struct files_struct *files) +@@ -8639,8 +8577,7 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx, + + spin_lock_irq(&ctx->completion_lock); + list_for_each_entry_reverse(de, &ctx->defer_list, list) { +- if (io_task_match(de->req, task) && +- io_match_files(de->req, files)) { ++ if (io_match_task(de->req, task, files)) { + list_cut_position(&list, &ctx->defer_list, &de->list); + break; + } +@@ -8657,73 +8594,56 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx, + } + } + +-/* +- * Returns true if we found and killed one or more files pinning requests +- */ +-static bool io_uring_cancel_files(struct io_ring_ctx *ctx, ++static int io_uring_count_inflight(struct io_ring_ctx *ctx, ++ struct task_struct *task, ++ struct files_struct *files) ++{ ++ struct io_kiocb *req; ++ int cnt = 0; ++ ++ spin_lock_irq(&ctx->inflight_lock); ++ list_for_each_entry(req, &ctx->inflight_list, inflight_entry) ++ cnt += io_match_task(req, task, files); ++ spin_unlock_irq(&ctx->inflight_lock); ++ return cnt; ++} ++ ++static void io_uring_cancel_files(struct io_ring_ctx *ctx, + struct task_struct *task, + struct files_struct *files) + { +- if (list_empty_careful(&ctx->inflight_list)) +- return false; +- + while (!list_empty_careful(&ctx->inflight_list)) { +- struct io_kiocb *cancel_req = NULL, *req; ++ struct io_task_cancel cancel = { .task = task, .files = files }; + DEFINE_WAIT(wait); ++ int inflight; + +- spin_lock_irq(&ctx->inflight_lock); +- list_for_each_entry(req, &ctx->inflight_list, inflight_entry) { +- if (req->task == task && +- (req->work.flags & IO_WQ_WORK_FILES) && +- req->work.identity->files != files) +- continue; +- /* req is being completed, ignore */ +- if (!refcount_inc_not_zero(&req->refs)) +- continue; +- cancel_req = req; +- break; +- } +- if (cancel_req) +- prepare_to_wait(&ctx->inflight_wait, &wait, +- TASK_UNINTERRUPTIBLE); +- spin_unlock_irq(&ctx->inflight_lock); +- +- /* We need to keep going until we don't find a matching req */ +- if (!cancel_req) ++ inflight = io_uring_count_inflight(ctx, task, files); ++ if (!inflight) + break; +- /* cancel this request, or head link requests */ +- io_attempt_cancel(ctx, cancel_req); +- io_cqring_overflow_flush(ctx, true, task, files); + +- io_put_req(cancel_req); ++ io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true); ++ io_poll_remove_all(ctx, task, files); ++ io_kill_timeouts(ctx, task, files); + /* cancellations _may_ trigger task work */ + io_run_task_work(); +- schedule(); +- finish_wait(&ctx->inflight_wait, &wait); +- } +- +- return true; +-} + +-static bool io_cancel_task_cb(struct io_wq_work *work, void *data) +-{ +- struct io_kiocb *req = container_of(work, struct io_kiocb, work); +- struct task_struct *task = data; +- +- return io_task_match(req, task); ++ prepare_to_wait(&task->io_uring->wait, &wait, ++ TASK_UNINTERRUPTIBLE); ++ if (inflight == io_uring_count_inflight(ctx, task, files)) ++ schedule(); ++ finish_wait(&task->io_uring->wait, &wait); ++ } + } + +-static bool __io_uring_cancel_task_requests(struct io_ring_ctx *ctx, +- struct task_struct *task, +- struct files_struct *files) ++static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx, ++ struct task_struct *task) + { +- bool ret; +- +- ret = io_uring_cancel_files(ctx, task, files); +- if (!files) { ++ while (1) { ++ struct io_task_cancel cancel = { .task = task, .files = NULL, }; + enum io_wq_cancel cret; ++ bool ret = false; + +- cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, task, true); ++ cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true); + if (cret != IO_WQ_CANCEL_NOTFOUND) + ret = true; + +@@ -8735,11 +8655,13 @@ static bool __io_uring_cancel_task_requests(struct io_ring_ctx *ctx, + } + } + +- ret |= io_poll_remove_all(ctx, task); +- ret |= io_kill_timeouts(ctx, task); ++ ret |= io_poll_remove_all(ctx, task, NULL); ++ ret |= io_kill_timeouts(ctx, task, NULL); ++ if (!ret) ++ break; ++ io_run_task_work(); ++ cond_resched(); + } +- +- return ret; + } + + static void io_disable_sqo_submit(struct io_ring_ctx *ctx) +@@ -8764,8 +8686,6 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, + struct task_struct *task = current; + + if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) { +- /* for SQPOLL only sqo_task has task notes */ +- WARN_ON_ONCE(ctx->sqo_task != current); + io_disable_sqo_submit(ctx); + task = ctx->sq_data->thread; + atomic_inc(&task->io_uring->in_idle); +@@ -8775,10 +8695,9 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, + io_cancel_defer_files(ctx, task, files); + io_cqring_overflow_flush(ctx, true, task, files); + +- while (__io_uring_cancel_task_requests(ctx, task, files)) { +- io_run_task_work(); +- cond_resched(); +- } ++ io_uring_cancel_files(ctx, task, files); ++ if (!files) ++ __io_uring_cancel_task_requests(ctx, task); + + if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) { + atomic_dec(&task->io_uring->in_idle); +@@ -8919,15 +8838,15 @@ void __io_uring_task_cancel(void) + prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE); + + /* +- * If we've seen completions, retry. This avoids a race where +- * a completion comes in before we did prepare_to_wait(). ++ * If we've seen completions, retry without waiting. This ++ * avoids a race where a completion comes in before we did ++ * prepare_to_wait(). + */ +- if (inflight != tctx_inflight(tctx)) +- continue; +- schedule(); ++ if (inflight == tctx_inflight(tctx)) ++ schedule(); ++ finish_wait(&tctx->wait, &wait); + } while (1); + +- finish_wait(&tctx->wait, &wait); + atomic_dec(&tctx->in_idle); + + io_uring_remove_task_files(tctx); +@@ -8938,6 +8857,9 @@ static int io_uring_flush(struct file *file, void *data) + struct io_uring_task *tctx = current->io_uring; + struct io_ring_ctx *ctx = file->private_data; + ++ if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) ++ io_uring_cancel_task_requests(ctx, NULL); ++ + if (!tctx) + return 0; + +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c +index cbadcf6ca4da2..b8712b835b105 100644 +--- a/fs/nfs/pnfs.c ++++ b/fs/nfs/pnfs.c +@@ -1000,7 +1000,7 @@ pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo, + { + u32 seqid = be32_to_cpu(stateid->seqid); + +- return !pnfs_seqid_is_newer(seqid, lo->plh_barrier); ++ return !pnfs_seqid_is_newer(seqid, lo->plh_barrier) && lo->plh_barrier; + } + + /* lget is set to 1 if called from inside send_layoutget call chain */ +@@ -1913,6 +1913,11 @@ static void nfs_layoutget_end(struct pnfs_layout_hdr *lo) + wake_up_var(&lo->plh_outstanding); + } + ++static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo) ++{ ++ return test_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags); ++} ++ + static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo) + { + unsigned long *bitlock = &lo->plh_flags; +@@ -2387,23 +2392,34 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) + goto out_forget; + } + +- if (!pnfs_layout_is_valid(lo)) { +- /* We have a completely new layout */ +- pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, true); +- } else if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) { ++ if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) { + /* existing state ID, make sure the sequence number matches. */ + if (pnfs_layout_stateid_blocked(lo, &res->stateid)) { ++ if (!pnfs_layout_is_valid(lo) && ++ pnfs_is_first_layoutget(lo)) ++ lo->plh_barrier = 0; + dprintk("%s forget reply due to sequence\n", __func__); + goto out_forget; + } + pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, false); +- } else { ++ } else if (pnfs_layout_is_valid(lo)) { + /* + * We got an entirely new state ID. Mark all segments for the + * inode invalid, and retry the layoutget + */ +- pnfs_mark_layout_stateid_invalid(lo, &free_me); ++ struct pnfs_layout_range range = { ++ .iomode = IOMODE_ANY, ++ .length = NFS4_MAX_UINT64, ++ }; ++ pnfs_set_plh_return_info(lo, IOMODE_ANY, 0); ++ pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, ++ &range, 0); + goto out_forget; ++ } else { ++ /* We have a completely new layout */ ++ if (!pnfs_is_first_layoutget(lo)) ++ goto out_forget; ++ pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, true); + } + + pnfs_get_lseg(lseg); +diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c +index 64bc81363c6cc..e1bd592ce7001 100644 +--- a/fs/nilfs2/file.c ++++ b/fs/nilfs2/file.c +@@ -141,6 +141,7 @@ const struct file_operations nilfs_file_operations = { + /* .release = nilfs_release_file, */ + .fsync = nilfs_sync_file, + .splice_read = generic_file_splice_read, ++ .splice_write = iter_file_splice_write, + }; + + const struct inode_operations nilfs_file_inode_operations = { +diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c +index 8a19773b5a0b7..45f44425d8560 100644 +--- a/fs/squashfs/block.c ++++ b/fs/squashfs/block.c +@@ -196,9 +196,15 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length, + length = SQUASHFS_COMPRESSED_SIZE(length); + index += 2; + +- TRACE("Block @ 0x%llx, %scompressed size %d\n", index, ++ TRACE("Block @ 0x%llx, %scompressed size %d\n", index - 2, + compressed ? "" : "un", length); + } ++ if (length < 0 || length > output->length || ++ (index + length) > msblk->bytes_used) { ++ res = -EIO; ++ goto out; ++ } ++ + if (next_index) + *next_index = index + length; + +diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c +index ae2c87bb0fbec..eb02072d28dd6 100644 +--- a/fs/squashfs/export.c ++++ b/fs/squashfs/export.c +@@ -41,12 +41,17 @@ static long long squashfs_inode_lookup(struct super_block *sb, int ino_num) + struct squashfs_sb_info *msblk = sb->s_fs_info; + int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1); + int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1); +- u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]); ++ u64 start; + __le64 ino; + int err; + + TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num); + ++ if (ino_num == 0 || (ino_num - 1) >= msblk->inodes) ++ return -EINVAL; ++ ++ start = le64_to_cpu(msblk->inode_lookup_table[blk]); ++ + err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino)); + if (err < 0) + return err; +@@ -111,7 +116,10 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, + u64 lookup_table_start, u64 next_table, unsigned int inodes) + { + unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes); ++ unsigned int indexes = SQUASHFS_LOOKUP_BLOCKS(inodes); ++ int n; + __le64 *table; ++ u64 start, end; + + TRACE("In read_inode_lookup_table, length %d\n", length); + +@@ -121,20 +129,37 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, + if (inodes == 0) + return ERR_PTR(-EINVAL); + +- /* length bytes should not extend into the next table - this check +- * also traps instances where lookup_table_start is incorrectly larger +- * than the next table start ++ /* ++ * The computed size of the lookup table (length bytes) should exactly ++ * match the table start and end points + */ +- if (lookup_table_start + length > next_table) ++ if (length != (next_table - lookup_table_start)) + return ERR_PTR(-EINVAL); + + table = squashfs_read_table(sb, lookup_table_start, length); ++ if (IS_ERR(table)) ++ return table; + + /* +- * table[0] points to the first inode lookup table metadata block, +- * this should be less than lookup_table_start ++ * table0], table[1], ... table[indexes - 1] store the locations ++ * of the compressed inode lookup blocks. Each entry should be ++ * less than the next (i.e. table[0] < table[1]), and the difference ++ * between them should be SQUASHFS_METADATA_SIZE or less. ++ * table[indexes - 1] should be less than lookup_table_start, and ++ * again the difference should be SQUASHFS_METADATA_SIZE or less + */ +- if (!IS_ERR(table) && le64_to_cpu(table[0]) >= lookup_table_start) { ++ for (n = 0; n < (indexes - 1); n++) { ++ start = le64_to_cpu(table[n]); ++ end = le64_to_cpu(table[n + 1]); ++ ++ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } ++ } ++ ++ start = le64_to_cpu(table[indexes - 1]); ++ if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) { + kfree(table); + return ERR_PTR(-EINVAL); + } +diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c +index 6be5afe7287d6..11581bf31af41 100644 +--- a/fs/squashfs/id.c ++++ b/fs/squashfs/id.c +@@ -35,10 +35,15 @@ int squashfs_get_id(struct super_block *sb, unsigned int index, + struct squashfs_sb_info *msblk = sb->s_fs_info; + int block = SQUASHFS_ID_BLOCK(index); + int offset = SQUASHFS_ID_BLOCK_OFFSET(index); +- u64 start_block = le64_to_cpu(msblk->id_table[block]); ++ u64 start_block; + __le32 disk_id; + int err; + ++ if (index >= msblk->ids) ++ return -EINVAL; ++ ++ start_block = le64_to_cpu(msblk->id_table[block]); ++ + err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset, + sizeof(disk_id)); + if (err < 0) +@@ -56,7 +61,10 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb, + u64 id_table_start, u64 next_table, unsigned short no_ids) + { + unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids); ++ unsigned int indexes = SQUASHFS_ID_BLOCKS(no_ids); ++ int n; + __le64 *table; ++ u64 start, end; + + TRACE("In read_id_index_table, length %d\n", length); + +@@ -67,20 +75,36 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb, + return ERR_PTR(-EINVAL); + + /* +- * length bytes should not extend into the next table - this check +- * also traps instances where id_table_start is incorrectly larger +- * than the next table start ++ * The computed size of the index table (length bytes) should exactly ++ * match the table start and end points + */ +- if (id_table_start + length > next_table) ++ if (length != (next_table - id_table_start)) + return ERR_PTR(-EINVAL); + + table = squashfs_read_table(sb, id_table_start, length); ++ if (IS_ERR(table)) ++ return table; + + /* +- * table[0] points to the first id lookup table metadata block, this +- * should be less than id_table_start ++ * table[0], table[1], ... table[indexes - 1] store the locations ++ * of the compressed id blocks. Each entry should be less than ++ * the next (i.e. table[0] < table[1]), and the difference between them ++ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1] ++ * should be less than id_table_start, and again the difference ++ * should be SQUASHFS_METADATA_SIZE or less + */ +- if (!IS_ERR(table) && le64_to_cpu(table[0]) >= id_table_start) { ++ for (n = 0; n < (indexes - 1); n++) { ++ start = le64_to_cpu(table[n]); ++ end = le64_to_cpu(table[n + 1]); ++ ++ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } ++ } ++ ++ start = le64_to_cpu(table[indexes - 1]); ++ if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) { + kfree(table); + return ERR_PTR(-EINVAL); + } +diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h +index 34c21ffb6df37..166e98806265b 100644 +--- a/fs/squashfs/squashfs_fs_sb.h ++++ b/fs/squashfs/squashfs_fs_sb.h +@@ -64,5 +64,6 @@ struct squashfs_sb_info { + unsigned int inodes; + unsigned int fragments; + int xattr_ids; ++ unsigned int ids; + }; + #endif +diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c +index d6c6593ec169e..88cc94be10765 100644 +--- a/fs/squashfs/super.c ++++ b/fs/squashfs/super.c +@@ -166,6 +166,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) + msblk->directory_table = le64_to_cpu(sblk->directory_table_start); + msblk->inodes = le32_to_cpu(sblk->inodes); + msblk->fragments = le32_to_cpu(sblk->fragments); ++ msblk->ids = le16_to_cpu(sblk->no_ids); + flags = le16_to_cpu(sblk->flags); + + TRACE("Found valid superblock on %pg\n", sb->s_bdev); +@@ -177,7 +178,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) + TRACE("Block size %d\n", msblk->block_size); + TRACE("Number of inodes %d\n", msblk->inodes); + TRACE("Number of fragments %d\n", msblk->fragments); +- TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids)); ++ TRACE("Number of ids %d\n", msblk->ids); + TRACE("sblk->inode_table_start %llx\n", msblk->inode_table); + TRACE("sblk->directory_table_start %llx\n", msblk->directory_table); + TRACE("sblk->fragment_table_start %llx\n", +@@ -236,8 +237,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) + allocate_id_index_table: + /* Allocate and read id index table */ + msblk->id_table = squashfs_read_id_index_table(sb, +- le64_to_cpu(sblk->id_table_start), next_table, +- le16_to_cpu(sblk->no_ids)); ++ le64_to_cpu(sblk->id_table_start), next_table, msblk->ids); + if (IS_ERR(msblk->id_table)) { + errorf(fc, "unable to read id index table"); + err = PTR_ERR(msblk->id_table); +diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h +index 184129afd4566..d8a270d3ac4cb 100644 +--- a/fs/squashfs/xattr.h ++++ b/fs/squashfs/xattr.h +@@ -17,8 +17,16 @@ extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *, + static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb, + u64 start, u64 *xattr_table_start, int *xattr_ids) + { ++ struct squashfs_xattr_id_table *id_table; ++ ++ id_table = squashfs_read_table(sb, start, sizeof(*id_table)); ++ if (IS_ERR(id_table)) ++ return (__le64 *) id_table; ++ ++ *xattr_table_start = le64_to_cpu(id_table->xattr_table_start); ++ kfree(id_table); ++ + ERROR("Xattrs in filesystem, these will be ignored\n"); +- *xattr_table_start = start; + return ERR_PTR(-ENOTSUPP); + } + +diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c +index d99e08464554f..ead66670b41a5 100644 +--- a/fs/squashfs/xattr_id.c ++++ b/fs/squashfs/xattr_id.c +@@ -31,10 +31,15 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index, + struct squashfs_sb_info *msblk = sb->s_fs_info; + int block = SQUASHFS_XATTR_BLOCK(index); + int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index); +- u64 start_block = le64_to_cpu(msblk->xattr_id_table[block]); ++ u64 start_block; + struct squashfs_xattr_id id; + int err; + ++ if (index >= msblk->xattr_ids) ++ return -EINVAL; ++ ++ start_block = le64_to_cpu(msblk->xattr_id_table[block]); ++ + err = squashfs_read_metadata(sb, &id, &start_block, &offset, + sizeof(id)); + if (err < 0) +@@ -50,13 +55,17 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index, + /* + * Read uncompressed xattr id lookup table indexes from disk into memory + */ +-__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start, ++__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start, + u64 *xattr_table_start, int *xattr_ids) + { +- unsigned int len; ++ struct squashfs_sb_info *msblk = sb->s_fs_info; ++ unsigned int len, indexes; + struct squashfs_xattr_id_table *id_table; ++ __le64 *table; ++ u64 start, end; ++ int n; + +- id_table = squashfs_read_table(sb, start, sizeof(*id_table)); ++ id_table = squashfs_read_table(sb, table_start, sizeof(*id_table)); + if (IS_ERR(id_table)) + return (__le64 *) id_table; + +@@ -70,13 +79,52 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start, + if (*xattr_ids == 0) + return ERR_PTR(-EINVAL); + +- /* xattr_table should be less than start */ +- if (*xattr_table_start >= start) ++ len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids); ++ indexes = SQUASHFS_XATTR_BLOCKS(*xattr_ids); ++ ++ /* ++ * The computed size of the index table (len bytes) should exactly ++ * match the table start and end points ++ */ ++ start = table_start + sizeof(*id_table); ++ end = msblk->bytes_used; ++ ++ if (len != (end - start)) + return ERR_PTR(-EINVAL); + +- len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids); ++ table = squashfs_read_table(sb, start, len); ++ if (IS_ERR(table)) ++ return table; ++ ++ /* table[0], table[1], ... table[indexes - 1] store the locations ++ * of the compressed xattr id blocks. Each entry should be less than ++ * the next (i.e. table[0] < table[1]), and the difference between them ++ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1] ++ * should be less than table_start, and again the difference ++ * shouls be SQUASHFS_METADATA_SIZE or less. ++ * ++ * Finally xattr_table_start should be less than table[0]. ++ */ ++ for (n = 0; n < (indexes - 1); n++) { ++ start = le64_to_cpu(table[n]); ++ end = le64_to_cpu(table[n + 1]); ++ ++ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } ++ } ++ ++ start = le64_to_cpu(table[indexes - 1]); ++ if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } + +- TRACE("In read_xattr_index_table, length %d\n", len); ++ if (*xattr_table_start >= le64_to_cpu(table[0])) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } + +- return squashfs_read_table(sb, start + sizeof(*id_table), len); ++ return table; + } +diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h +index 9548d075e06da..b998e4b736912 100644 +--- a/include/linux/sunrpc/xdr.h ++++ b/include/linux/sunrpc/xdr.h +@@ -25,8 +25,7 @@ struct rpc_rqst; + #define XDR_QUADLEN(l) (((l) + 3) >> 2) + + /* +- * Generic opaque `network object.' At the kernel level, this type +- * is used only by lockd. ++ * Generic opaque `network object.' + */ + #define XDR_MAX_NETOBJ 1024 + struct xdr_netobj { +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 618cb1b451ade..8c017f8c0c6d6 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -6822,7 +6822,7 @@ static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode) + case BPF_JSGT: + if (reg->s32_min_value > sval) + return 1; +- else if (reg->s32_max_value < sval) ++ else if (reg->s32_max_value <= sval) + return 0; + break; + case BPF_JLT: +@@ -6895,7 +6895,7 @@ static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) + case BPF_JSGT: + if (reg->smin_value > sval) + return 1; +- else if (reg->smax_value < sval) ++ else if (reg->smax_value <= sval) + return 0; + break; + case BPF_JLT: +@@ -8465,7 +8465,11 @@ static bool range_within(struct bpf_reg_state *old, + return old->umin_value <= cur->umin_value && + old->umax_value >= cur->umax_value && + old->smin_value <= cur->smin_value && +- old->smax_value >= cur->smax_value; ++ old->smax_value >= cur->smax_value && ++ old->u32_min_value <= cur->u32_min_value && ++ old->u32_max_value >= cur->u32_max_value && ++ old->s32_min_value <= cur->s32_min_value && ++ old->s32_max_value >= cur->s32_max_value; + } + + /* Maximum number of register states that can exist at once */ +@@ -10862,30 +10866,28 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) + insn->code == (BPF_ALU | BPF_MOD | BPF_X) || + insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { + bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; +- struct bpf_insn mask_and_div[] = { +- BPF_MOV32_REG(insn->src_reg, insn->src_reg), ++ bool isdiv = BPF_OP(insn->code) == BPF_DIV; ++ struct bpf_insn *patchlet; ++ struct bpf_insn chk_and_div[] = { + /* Rx div 0 -> 0 */ +- BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), ++ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | ++ BPF_JNE | BPF_K, insn->src_reg, ++ 0, 2, 0), + BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + *insn, + }; +- struct bpf_insn mask_and_mod[] = { +- BPF_MOV32_REG(insn->src_reg, insn->src_reg), ++ struct bpf_insn chk_and_mod[] = { + /* Rx mod 0 -> Rx */ +- BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), ++ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | ++ BPF_JEQ | BPF_K, insn->src_reg, ++ 0, 1, 0), + *insn, + }; +- struct bpf_insn *patchlet; + +- if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || +- insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { +- patchlet = mask_and_div + (is64 ? 1 : 0); +- cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); +- } else { +- patchlet = mask_and_mod + (is64 ? 1 : 0); +- cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); +- } ++ patchlet = isdiv ? chk_and_div : chk_and_mod; ++ cnt = isdiv ? ARRAY_SIZE(chk_and_div) : ++ ARRAY_SIZE(chk_and_mod); + + new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); + if (!new_prog) +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index 8fc23d53f5500..a604e69ecfa57 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -6320,6 +6320,8 @@ static ssize_t memory_high_write(struct kernfs_open_file *of, + if (err) + return err; + ++ page_counter_set_high(&memcg->memory, high); ++ + for (;;) { + unsigned long nr_pages = page_counter_read(&memcg->memory); + unsigned long reclaimed; +@@ -6343,10 +6345,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of, + break; + } + +- page_counter_set_high(&memcg->memory, high); +- + memcg_wb_domain_size_changed(memcg); +- + return nbytes; + } + +diff --git a/net/key/af_key.c b/net/key/af_key.c +index c12dbc51ef5fe..ef9b4ac03e7b7 100644 +--- a/net/key/af_key.c ++++ b/net/key/af_key.c +@@ -2902,7 +2902,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t) + break; + if (!aalg->pfkey_supported) + continue; +- if (aalg_tmpl_set(t, aalg) && aalg->available) ++ if (aalg_tmpl_set(t, aalg)) + sz += sizeof(struct sadb_comb); + } + return sz + sizeof(struct sadb_prop); +@@ -2920,7 +2920,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t) + if (!ealg->pfkey_supported) + continue; + +- if (!(ealg_tmpl_set(t, ealg) && ealg->available)) ++ if (!(ealg_tmpl_set(t, ealg))) + continue; + + for (k = 1; ; k++) { +@@ -2931,7 +2931,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t) + if (!aalg->pfkey_supported) + continue; + +- if (aalg_tmpl_set(t, aalg) && aalg->available) ++ if (aalg_tmpl_set(t, aalg)) + sz += sizeof(struct sadb_comb); + } + } +diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c +index ae1cb2c687224..76747bfdaddd0 100644 +--- a/net/mac80211/spectmgmt.c ++++ b/net/mac80211/spectmgmt.c +@@ -133,16 +133,20 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, + } + + if (wide_bw_chansw_ie) { ++ u8 new_seg1 = wide_bw_chansw_ie->new_center_freq_seg1; + struct ieee80211_vht_operation vht_oper = { + .chan_width = + wide_bw_chansw_ie->new_channel_width, + .center_freq_seg0_idx = + wide_bw_chansw_ie->new_center_freq_seg0, +- .center_freq_seg1_idx = +- wide_bw_chansw_ie->new_center_freq_seg1, ++ .center_freq_seg1_idx = new_seg1, + /* .basic_mcs_set doesn't matter */ + }; +- struct ieee80211_ht_operation ht_oper = {}; ++ struct ieee80211_ht_operation ht_oper = { ++ .operation_mode = ++ cpu_to_le16(new_seg1 << ++ IEEE80211_HT_OP_MODE_CCFS2_SHIFT), ++ }; + + /* default, for the case of IEEE80211_VHT_CHANWIDTH_USE_HT, + * to the previously parsed chandef +diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c +index 4ecc2a9595674..5f42aa5fc6128 100644 +--- a/net/sunrpc/auth_gss/auth_gss.c ++++ b/net/sunrpc/auth_gss/auth_gss.c +@@ -29,6 +29,7 @@ + #include + #include + ++#include "auth_gss_internal.h" + #include "../netns.h" + + #include +@@ -125,35 +126,6 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) + clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); + } + +-static const void * +-simple_get_bytes(const void *p, const void *end, void *res, size_t len) +-{ +- const void *q = (const void *)((const char *)p + len); +- if (unlikely(q > end || q < p)) +- return ERR_PTR(-EFAULT); +- memcpy(res, p, len); +- return q; +-} +- +-static inline const void * +-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) +-{ +- const void *q; +- unsigned int len; +- +- p = simple_get_bytes(p, end, &len, sizeof(len)); +- if (IS_ERR(p)) +- return p; +- q = (const void *)((const char *)p + len); +- if (unlikely(q > end || q < p)) +- return ERR_PTR(-EFAULT); +- dest->data = kmemdup(p, len, GFP_NOFS); +- if (unlikely(dest->data == NULL)) +- return ERR_PTR(-ENOMEM); +- dest->len = len; +- return q; +-} +- + static struct gss_cl_ctx * + gss_cred_get_ctx(struct rpc_cred *cred) + { +diff --git a/net/sunrpc/auth_gss/auth_gss_internal.h b/net/sunrpc/auth_gss/auth_gss_internal.h +new file mode 100644 +index 0000000000000..f6d9631bd9d00 +--- /dev/null ++++ b/net/sunrpc/auth_gss/auth_gss_internal.h +@@ -0,0 +1,45 @@ ++// SPDX-License-Identifier: BSD-3-Clause ++/* ++ * linux/net/sunrpc/auth_gss/auth_gss_internal.h ++ * ++ * Internal definitions for RPCSEC_GSS client authentication ++ * ++ * Copyright (c) 2000 The Regents of the University of Michigan. ++ * All rights reserved. ++ * ++ */ ++#include ++#include ++#include ++ ++static inline const void * ++simple_get_bytes(const void *p, const void *end, void *res, size_t len) ++{ ++ const void *q = (const void *)((const char *)p + len); ++ if (unlikely(q > end || q < p)) ++ return ERR_PTR(-EFAULT); ++ memcpy(res, p, len); ++ return q; ++} ++ ++static inline const void * ++simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) ++{ ++ const void *q; ++ unsigned int len; ++ ++ p = simple_get_bytes(p, end, &len, sizeof(len)); ++ if (IS_ERR(p)) ++ return p; ++ q = (const void *)((const char *)p + len); ++ if (unlikely(q > end || q < p)) ++ return ERR_PTR(-EFAULT); ++ if (len) { ++ dest->data = kmemdup(p, len, GFP_NOFS); ++ if (unlikely(dest->data == NULL)) ++ return ERR_PTR(-ENOMEM); ++ } else ++ dest->data = NULL; ++ dest->len = len; ++ return q; ++} +diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c +index ae9acf3a73898..1c092b05c2bba 100644 +--- a/net/sunrpc/auth_gss/gss_krb5_mech.c ++++ b/net/sunrpc/auth_gss/gss_krb5_mech.c +@@ -21,6 +21,8 @@ + #include + #include + ++#include "auth_gss_internal.h" ++ + #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) + # define RPCDBG_FACILITY RPCDBG_AUTH + #endif +@@ -143,35 +145,6 @@ get_gss_krb5_enctype(int etype) + return NULL; + } + +-static const void * +-simple_get_bytes(const void *p, const void *end, void *res, int len) +-{ +- const void *q = (const void *)((const char *)p + len); +- if (unlikely(q > end || q < p)) +- return ERR_PTR(-EFAULT); +- memcpy(res, p, len); +- return q; +-} +- +-static const void * +-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) +-{ +- const void *q; +- unsigned int len; +- +- p = simple_get_bytes(p, end, &len, sizeof(len)); +- if (IS_ERR(p)) +- return p; +- q = (const void *)((const char *)p + len); +- if (unlikely(q > end || q < p)) +- return ERR_PTR(-EFAULT); +- res->data = kmemdup(p, len, GFP_NOFS); +- if (unlikely(res->data == NULL)) +- return ERR_PTR(-ENOMEM); +- res->len = len; +- return q; +-} +- + static inline const void * + get_key(const void *p, const void *end, + struct krb5_ctx *ctx, struct crypto_sync_skcipher **res) +diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c +index 1c5114dedda92..fe49e9a97f0ec 100644 +--- a/sound/hda/intel-dsp-config.c ++++ b/sound/hda/intel-dsp-config.c +@@ -306,6 +306,10 @@ static const struct config_entry config_table[] = { + .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, + .device = 0xa0c8, + }, ++ { ++ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, ++ .device = 0x43c8, ++ }, + #endif + + /* Elkhart Lake */ +diff --git a/sound/soc/codecs/ak4458.c b/sound/soc/codecs/ak4458.c +index 1010c9ee2e836..472caad17012e 100644 +--- a/sound/soc/codecs/ak4458.c ++++ b/sound/soc/codecs/ak4458.c +@@ -595,18 +595,10 @@ static struct snd_soc_dai_driver ak4497_dai = { + .ops = &ak4458_dai_ops, + }; + +-static void ak4458_power_off(struct ak4458_priv *ak4458) ++static void ak4458_reset(struct ak4458_priv *ak4458, bool active) + { + if (ak4458->reset_gpiod) { +- gpiod_set_value_cansleep(ak4458->reset_gpiod, 0); +- usleep_range(1000, 2000); +- } +-} +- +-static void ak4458_power_on(struct ak4458_priv *ak4458) +-{ +- if (ak4458->reset_gpiod) { +- gpiod_set_value_cansleep(ak4458->reset_gpiod, 1); ++ gpiod_set_value_cansleep(ak4458->reset_gpiod, active); + usleep_range(1000, 2000); + } + } +@@ -620,7 +612,7 @@ static int ak4458_init(struct snd_soc_component *component) + if (ak4458->mute_gpiod) + gpiod_set_value_cansleep(ak4458->mute_gpiod, 1); + +- ak4458_power_on(ak4458); ++ ak4458_reset(ak4458, false); + + ret = snd_soc_component_update_bits(component, AK4458_00_CONTROL1, + 0x80, 0x80); /* ACKS bit = 1; 10000000 */ +@@ -650,7 +642,7 @@ static void ak4458_remove(struct snd_soc_component *component) + { + struct ak4458_priv *ak4458 = snd_soc_component_get_drvdata(component); + +- ak4458_power_off(ak4458); ++ ak4458_reset(ak4458, true); + } + + #ifdef CONFIG_PM +@@ -660,7 +652,7 @@ static int __maybe_unused ak4458_runtime_suspend(struct device *dev) + + regcache_cache_only(ak4458->regmap, true); + +- ak4458_power_off(ak4458); ++ ak4458_reset(ak4458, true); + + if (ak4458->mute_gpiod) + gpiod_set_value_cansleep(ak4458->mute_gpiod, 0); +@@ -685,8 +677,8 @@ static int __maybe_unused ak4458_runtime_resume(struct device *dev) + if (ak4458->mute_gpiod) + gpiod_set_value_cansleep(ak4458->mute_gpiod, 1); + +- ak4458_power_off(ak4458); +- ak4458_power_on(ak4458); ++ ak4458_reset(ak4458, true); ++ ak4458_reset(ak4458, false); + + regcache_cache_only(ak4458->regmap, false); + regcache_mark_dirty(ak4458->regmap); +diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c +index dec8716aa8ef5..985b2dcecf138 100644 +--- a/sound/soc/codecs/wm_adsp.c ++++ b/sound/soc/codecs/wm_adsp.c +@@ -2031,11 +2031,14 @@ static struct wm_coeff_ctl *wm_adsp_get_ctl(struct wm_adsp *dsp, + unsigned int alg) + { + struct wm_coeff_ctl *pos, *rslt = NULL; ++ const char *fw_txt = wm_adsp_fw_text[dsp->fw]; + + list_for_each_entry(pos, &dsp->ctl_list, list) { + if (!pos->subname) + continue; + if (strncmp(pos->subname, name, pos->subname_len) == 0 && ++ strncmp(pos->fw_name, fw_txt, ++ SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == 0 && + pos->alg_region.alg == alg && + pos->alg_region.type == type) { + rslt = pos; +diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c +index b29946eb43551..a8d43c87cb5a2 100644 +--- a/sound/soc/intel/boards/sof_sdw.c ++++ b/sound/soc/intel/boards/sof_sdw.c +@@ -57,6 +57,16 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = { + .driver_data = (void *)(SOF_RT711_JD_SRC_JD2 | + SOF_RT715_DAI_ID_FIX), + }, ++ { ++ .callback = sof_sdw_quirk_cb, ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A5E") ++ }, ++ .driver_data = (void *)(SOF_RT711_JD_SRC_JD2 | ++ SOF_RT715_DAI_ID_FIX | ++ SOF_SDW_FOUR_SPK), ++ }, + { + .callback = sof_sdw_quirk_cb, + .matches = { +diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c +index d699e61eca3d0..0955cbb4e9187 100644 +--- a/sound/soc/intel/skylake/skl-topology.c ++++ b/sound/soc/intel/skylake/skl-topology.c +@@ -3632,7 +3632,7 @@ static void skl_tplg_complete(struct snd_soc_component *component) + sprintf(chan_text, "c%d", mach->mach_params.dmic_num); + + for (i = 0; i < se->items; i++) { +- struct snd_ctl_elem_value val; ++ struct snd_ctl_elem_value val = {}; + + if (strstr(texts[i], chan_text)) { + val.value.enumerated.item[0] = i; From d5917f2379ebe3e9c1b778895928b85e57244fde Mon Sep 17 00:00:00 2001 From: Igor Pecovnik Date: Sun, 14 Feb 2021 15:57:06 +0100 Subject: [PATCH 7/7] Odroidxu4 - upstream patch --- config/kernel/linux-odroidxu4-current.config | 2 +- .../odroidxu4-current/patch-5.4.97-98.patch | 1203 +++++++++++++++++ 2 files changed, 1204 insertions(+), 1 deletion(-) create mode 100644 patch/kernel/odroidxu4-current/patch-5.4.97-98.patch diff --git a/config/kernel/linux-odroidxu4-current.config b/config/kernel/linux-odroidxu4-current.config index 7a52e063b..96d6b64cf 100644 --- a/config/kernel/linux-odroidxu4-current.config +++ b/config/kernel/linux-odroidxu4-current.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm 5.4.97 Kernel Configuration +# Linux/arm 5.4.98 Kernel Configuration # # diff --git a/patch/kernel/odroidxu4-current/patch-5.4.97-98.patch b/patch/kernel/odroidxu4-current/patch-5.4.97-98.patch new file mode 100644 index 000000000..6e7c8524e --- /dev/null +++ b/patch/kernel/odroidxu4-current/patch-5.4.97-98.patch @@ -0,0 +1,1203 @@ +diff --git a/Makefile b/Makefile +index 032751f6be0c1..4f6bfcf434e80 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 97 ++SUBLEVEL = 98 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index 4906e480b5bb6..296b0d7570d06 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -1835,6 +1835,8 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, + struct page **pages; + unsigned long first, last; + ++ lockdep_assert_held(&kvm->lock); ++ + if (ulen == 0 || uaddr + ulen < uaddr) + return NULL; + +@@ -7091,12 +7093,21 @@ static int svm_register_enc_region(struct kvm *kvm, + if (!region) + return -ENOMEM; + ++ mutex_lock(&kvm->lock); + region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1); + if (!region->pages) { + ret = -ENOMEM; ++ mutex_unlock(&kvm->lock); + goto e_free; + } + ++ region->uaddr = range->addr; ++ region->size = range->size; ++ ++ mutex_lock(&kvm->lock); ++ list_add_tail(®ion->list, &sev->regions_list); ++ mutex_unlock(&kvm->lock); ++ + /* + * The guest may change the memory encryption attribute from C=0 -> C=1 + * or vice versa for this memory range. Lets make sure caches are +@@ -7105,13 +7116,6 @@ static int svm_register_enc_region(struct kvm *kvm, + */ + sev_clflush_pages(region->pages, region->npages); + +- region->uaddr = range->addr; +- region->size = range->size; +- +- mutex_lock(&kvm->lock); +- list_add_tail(®ion->list, &sev->regions_list); +- mutex_unlock(&kvm->lock); +- + return ret; + + e_free: +diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c +index 3d34ac02d76ef..cb3d44d200055 100644 +--- a/block/blk-cgroup.c ++++ b/block/blk-cgroup.c +@@ -1089,6 +1089,8 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css) + */ + void blkcg_destroy_blkgs(struct blkcg *blkcg) + { ++ might_sleep(); ++ + spin_lock_irq(&blkcg->lock); + + while (!hlist_empty(&blkcg->blkg_list)) { +@@ -1096,14 +1098,20 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg) + struct blkcg_gq, blkcg_node); + struct request_queue *q = blkg->q; + +- if (spin_trylock(&q->queue_lock)) { +- blkg_destroy(blkg); +- spin_unlock(&q->queue_lock); +- } else { ++ if (need_resched() || !spin_trylock(&q->queue_lock)) { ++ /* ++ * Given that the system can accumulate a huge number ++ * of blkgs in pathological cases, check to see if we ++ * need to rescheduling to avoid softlockup. ++ */ + spin_unlock_irq(&blkcg->lock); +- cpu_relax(); ++ cond_resched(); + spin_lock_irq(&blkcg->lock); ++ continue; + } ++ ++ blkg_destroy(blkg); ++ spin_unlock(&q->queue_lock); + } + + spin_unlock_irq(&blkcg->lock); +diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c +index eddc6d1bdb2d1..82b76df43ae57 100644 +--- a/drivers/crypto/chelsio/chtls/chtls_cm.c ++++ b/drivers/crypto/chelsio/chtls/chtls_cm.c +@@ -1047,11 +1047,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk, + + n = dst_neigh_lookup(dst, &iph->saddr); + if (!n || !n->dev) +- goto free_sk; ++ goto free_dst; + + ndev = n->dev; +- if (!ndev) +- goto free_dst; + if (is_vlan_dev(ndev)) + ndev = vlan_dev_real_dev(ndev); + +@@ -1117,7 +1115,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk, + free_csk: + chtls_sock_release(&csk->kref); + free_dst: +- neigh_release(n); ++ if (n) ++ neigh_release(n); + dst_release(dst); + free_sk: + inet_csk_prepare_forced_close(newsk); +diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c +index 5a9f0d17f52c8..e1ef0122ef759 100644 +--- a/drivers/i2c/busses/i2c-mt65xx.c ++++ b/drivers/i2c/busses/i2c-mt65xx.c +@@ -1008,7 +1008,8 @@ static int mtk_i2c_probe(struct platform_device *pdev) + mtk_i2c_clock_disable(i2c); + + ret = devm_request_irq(&pdev->dev, irq, mtk_i2c_irq, +- IRQF_TRIGGER_NONE, I2C_DRV_NAME, i2c); ++ IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE, ++ I2C_DRV_NAME, i2c); + if (ret < 0) { + dev_err(&pdev->dev, + "Request I2C IRQ %d fail\n", irq); +@@ -1035,7 +1036,16 @@ static int mtk_i2c_remove(struct platform_device *pdev) + } + + #ifdef CONFIG_PM_SLEEP +-static int mtk_i2c_resume(struct device *dev) ++static int mtk_i2c_suspend_noirq(struct device *dev) ++{ ++ struct mtk_i2c *i2c = dev_get_drvdata(dev); ++ ++ i2c_mark_adapter_suspended(&i2c->adap); ++ ++ return 0; ++} ++ ++static int mtk_i2c_resume_noirq(struct device *dev) + { + int ret; + struct mtk_i2c *i2c = dev_get_drvdata(dev); +@@ -1050,12 +1060,15 @@ static int mtk_i2c_resume(struct device *dev) + + mtk_i2c_clock_disable(i2c); + ++ i2c_mark_adapter_resumed(&i2c->adap); ++ + return 0; + } + #endif + + static const struct dev_pm_ops mtk_i2c_pm = { +- SET_SYSTEM_SLEEP_PM_OPS(NULL, mtk_i2c_resume) ++ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_i2c_suspend_noirq, ++ mtk_i2c_resume_noirq) + }; + + static struct platform_driver mtk_i2c_driver = { +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +index f043eefabb4ec..7b1d2dac6ceb8 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +@@ -514,7 +514,10 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file, + const size_t bufsz = sizeof(buf); + int pos = 0; + ++ mutex_lock(&mvm->mutex); + iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os); ++ mutex_unlock(&mvm->mutex); ++ + do_div(curr_os, NSEC_PER_USEC); + diff = curr_os - curr_gp2; + pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff); +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +index daae86cd61140..fc6430edd1107 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +@@ -4169,6 +4169,9 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, + iwl_mvm_binding_remove_vif(mvm, vif); + + out: ++ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) && ++ switching_chanctx) ++ return; + mvmvif->phy_ctxt = NULL; + iwl_mvm_power_update_mac(mvm); + } +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +index b04cc6214bac8..8b0576cde797e 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +@@ -838,6 +838,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, + if (!mvm->scan_cmd) + goto out_free; + ++ /* invalidate ids to prevent accidental removal of sta_id 0 */ ++ mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA; ++ mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA; ++ + /* Set EBS as successful as long as not stated otherwise by the FW. */ + mvm->last_ebs_successful = true; + +@@ -1238,6 +1242,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk) + reprobe = container_of(wk, struct iwl_mvm_reprobe, work); + if (device_reprobe(reprobe->dev)) + dev_err(reprobe->dev, "reprobe failed!\n"); ++ put_device(reprobe->dev); + kfree(reprobe); + module_put(THIS_MODULE); + } +@@ -1288,7 +1293,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) + module_put(THIS_MODULE); + return; + } +- reprobe->dev = mvm->trans->dev; ++ reprobe->dev = get_device(mvm->trans->dev); + INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); + schedule_work(&reprobe->work); + } else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +index a36aa9e85e0b3..40cafcf40ccf0 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +@@ -2070,6 +2070,9 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) + + lockdep_assert_held(&mvm->mutex); + ++ if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA)) ++ return -EINVAL; ++ + iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0); + ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); + if (ret) +@@ -2084,6 +2087,9 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) + + lockdep_assert_held(&mvm->mutex); + ++ if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA)) ++ return -EINVAL; ++ + iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0); + ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id); + if (ret) +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +index 7a5b024a6d384..eab159205e48b 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +@@ -164,8 +164,10 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, + /* Allocate IML */ + iml_img = dma_alloc_coherent(trans->dev, trans->iml_len, + &trans_pcie->iml_dma_addr, GFP_KERNEL); +- if (!iml_img) +- return -ENOMEM; ++ if (!iml_img) { ++ ret = -ENOMEM; ++ goto err_free_ctxt_info; ++ } + + memcpy(iml_img, trans->iml, trans->iml_len); + +@@ -207,6 +209,11 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, + + return 0; + ++err_free_ctxt_info: ++ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), ++ trans_pcie->ctxt_info_gen3, ++ trans_pcie->ctxt_info_dma_addr); ++ trans_pcie->ctxt_info_gen3 = NULL; + err_free_prph_info: + dma_free_coherent(trans->dev, + sizeof(*prph_info), +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +index d3b58334e13ea..e7dcf8bc99b7c 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +@@ -657,6 +657,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txq[txq_id]; + ++ if (!txq) { ++ IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); ++ return; ++ } ++ + spin_lock_bh(&txq->lock); + while (txq->write_ptr != txq->read_ptr) { + IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c +index c9b8613e69db2..5b9d570df85cc 100644 +--- a/drivers/regulator/core.c ++++ b/drivers/regulator/core.c +@@ -1772,13 +1772,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) + { + struct regulator_dev *r; + struct device *dev = rdev->dev.parent; +- int ret; ++ int ret = 0; + + /* No supply to resolve? */ + if (!rdev->supply_name) + return 0; + +- /* Supply already resolved? */ ++ /* Supply already resolved? (fast-path without locking contention) */ + if (rdev->supply) + return 0; + +@@ -1788,7 +1788,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) + + /* Did the lookup explicitly defer for us? */ + if (ret == -EPROBE_DEFER) +- return ret; ++ goto out; + + if (have_full_constraints()) { + r = dummy_regulator_rdev; +@@ -1796,15 +1796,18 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) + } else { + dev_err(dev, "Failed to resolve %s-supply for %s\n", + rdev->supply_name, rdev->desc->name); +- return -EPROBE_DEFER; ++ ret = -EPROBE_DEFER; ++ goto out; + } + } + + if (r == rdev) { + dev_err(dev, "Supply for %s (%s) resolved to itself\n", + rdev->desc->name, rdev->supply_name); +- if (!have_full_constraints()) +- return -EINVAL; ++ if (!have_full_constraints()) { ++ ret = -EINVAL; ++ goto out; ++ } + r = dummy_regulator_rdev; + get_device(&r->dev); + } +@@ -1818,7 +1821,8 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) + if (r->dev.parent && r->dev.parent != rdev->dev.parent) { + if (!device_is_bound(r->dev.parent)) { + put_device(&r->dev); +- return -EPROBE_DEFER; ++ ret = -EPROBE_DEFER; ++ goto out; + } + } + +@@ -1826,15 +1830,32 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) + ret = regulator_resolve_supply(r); + if (ret < 0) { + put_device(&r->dev); +- return ret; ++ goto out; ++ } ++ ++ /* ++ * Recheck rdev->supply with rdev->mutex lock held to avoid a race ++ * between rdev->supply null check and setting rdev->supply in ++ * set_supply() from concurrent tasks. ++ */ ++ regulator_lock(rdev); ++ ++ /* Supply just resolved by a concurrent task? */ ++ if (rdev->supply) { ++ regulator_unlock(rdev); ++ put_device(&r->dev); ++ goto out; + } + + ret = set_supply(rdev, r); + if (ret < 0) { ++ regulator_unlock(rdev); + put_device(&r->dev); +- return ret; ++ goto out; + } + ++ regulator_unlock(rdev); ++ + /* + * In set_machine_constraints() we may have turned this regulator on + * but we couldn't propagate to the supply if it hadn't been resolved +@@ -1845,11 +1866,12 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) + if (ret < 0) { + _regulator_put(rdev->supply); + rdev->supply = NULL; +- return ret; ++ goto out; + } + } + +- return 0; ++out: ++ return ret; + } + + /* Internal regulator request function */ +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c +index ca1d98f274d12..e3a79e6958124 100644 +--- a/fs/nfs/pnfs.c ++++ b/fs/nfs/pnfs.c +@@ -2369,7 +2369,13 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) + * We got an entirely new state ID. Mark all segments for the + * inode invalid, and retry the layoutget + */ +- pnfs_mark_layout_stateid_invalid(lo, &free_me); ++ struct pnfs_layout_range range = { ++ .iomode = IOMODE_ANY, ++ .length = NFS4_MAX_UINT64, ++ }; ++ pnfs_set_plh_return_info(lo, IOMODE_ANY, 0); ++ pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, ++ &range, 0); + goto out_forget; + } + +diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c +index ae2c87bb0fbec..eb02072d28dd6 100644 +--- a/fs/squashfs/export.c ++++ b/fs/squashfs/export.c +@@ -41,12 +41,17 @@ static long long squashfs_inode_lookup(struct super_block *sb, int ino_num) + struct squashfs_sb_info *msblk = sb->s_fs_info; + int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1); + int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1); +- u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]); ++ u64 start; + __le64 ino; + int err; + + TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num); + ++ if (ino_num == 0 || (ino_num - 1) >= msblk->inodes) ++ return -EINVAL; ++ ++ start = le64_to_cpu(msblk->inode_lookup_table[blk]); ++ + err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino)); + if (err < 0) + return err; +@@ -111,7 +116,10 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, + u64 lookup_table_start, u64 next_table, unsigned int inodes) + { + unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes); ++ unsigned int indexes = SQUASHFS_LOOKUP_BLOCKS(inodes); ++ int n; + __le64 *table; ++ u64 start, end; + + TRACE("In read_inode_lookup_table, length %d\n", length); + +@@ -121,20 +129,37 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, + if (inodes == 0) + return ERR_PTR(-EINVAL); + +- /* length bytes should not extend into the next table - this check +- * also traps instances where lookup_table_start is incorrectly larger +- * than the next table start ++ /* ++ * The computed size of the lookup table (length bytes) should exactly ++ * match the table start and end points + */ +- if (lookup_table_start + length > next_table) ++ if (length != (next_table - lookup_table_start)) + return ERR_PTR(-EINVAL); + + table = squashfs_read_table(sb, lookup_table_start, length); ++ if (IS_ERR(table)) ++ return table; + + /* +- * table[0] points to the first inode lookup table metadata block, +- * this should be less than lookup_table_start ++ * table0], table[1], ... table[indexes - 1] store the locations ++ * of the compressed inode lookup blocks. Each entry should be ++ * less than the next (i.e. table[0] < table[1]), and the difference ++ * between them should be SQUASHFS_METADATA_SIZE or less. ++ * table[indexes - 1] should be less than lookup_table_start, and ++ * again the difference should be SQUASHFS_METADATA_SIZE or less + */ +- if (!IS_ERR(table) && le64_to_cpu(table[0]) >= lookup_table_start) { ++ for (n = 0; n < (indexes - 1); n++) { ++ start = le64_to_cpu(table[n]); ++ end = le64_to_cpu(table[n + 1]); ++ ++ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } ++ } ++ ++ start = le64_to_cpu(table[indexes - 1]); ++ if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) { + kfree(table); + return ERR_PTR(-EINVAL); + } +diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c +index 6be5afe7287d6..11581bf31af41 100644 +--- a/fs/squashfs/id.c ++++ b/fs/squashfs/id.c +@@ -35,10 +35,15 @@ int squashfs_get_id(struct super_block *sb, unsigned int index, + struct squashfs_sb_info *msblk = sb->s_fs_info; + int block = SQUASHFS_ID_BLOCK(index); + int offset = SQUASHFS_ID_BLOCK_OFFSET(index); +- u64 start_block = le64_to_cpu(msblk->id_table[block]); ++ u64 start_block; + __le32 disk_id; + int err; + ++ if (index >= msblk->ids) ++ return -EINVAL; ++ ++ start_block = le64_to_cpu(msblk->id_table[block]); ++ + err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset, + sizeof(disk_id)); + if (err < 0) +@@ -56,7 +61,10 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb, + u64 id_table_start, u64 next_table, unsigned short no_ids) + { + unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids); ++ unsigned int indexes = SQUASHFS_ID_BLOCKS(no_ids); ++ int n; + __le64 *table; ++ u64 start, end; + + TRACE("In read_id_index_table, length %d\n", length); + +@@ -67,20 +75,36 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb, + return ERR_PTR(-EINVAL); + + /* +- * length bytes should not extend into the next table - this check +- * also traps instances where id_table_start is incorrectly larger +- * than the next table start ++ * The computed size of the index table (length bytes) should exactly ++ * match the table start and end points + */ +- if (id_table_start + length > next_table) ++ if (length != (next_table - id_table_start)) + return ERR_PTR(-EINVAL); + + table = squashfs_read_table(sb, id_table_start, length); ++ if (IS_ERR(table)) ++ return table; + + /* +- * table[0] points to the first id lookup table metadata block, this +- * should be less than id_table_start ++ * table[0], table[1], ... table[indexes - 1] store the locations ++ * of the compressed id blocks. Each entry should be less than ++ * the next (i.e. table[0] < table[1]), and the difference between them ++ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1] ++ * should be less than id_table_start, and again the difference ++ * should be SQUASHFS_METADATA_SIZE or less + */ +- if (!IS_ERR(table) && le64_to_cpu(table[0]) >= id_table_start) { ++ for (n = 0; n < (indexes - 1); n++) { ++ start = le64_to_cpu(table[n]); ++ end = le64_to_cpu(table[n + 1]); ++ ++ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } ++ } ++ ++ start = le64_to_cpu(table[indexes - 1]); ++ if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) { + kfree(table); + return ERR_PTR(-EINVAL); + } +diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h +index 34c21ffb6df37..166e98806265b 100644 +--- a/fs/squashfs/squashfs_fs_sb.h ++++ b/fs/squashfs/squashfs_fs_sb.h +@@ -64,5 +64,6 @@ struct squashfs_sb_info { + unsigned int inodes; + unsigned int fragments; + int xattr_ids; ++ unsigned int ids; + }; + #endif +diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c +index 0cc4ceec05624..2110323b610b9 100644 +--- a/fs/squashfs/super.c ++++ b/fs/squashfs/super.c +@@ -166,6 +166,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) + msblk->directory_table = le64_to_cpu(sblk->directory_table_start); + msblk->inodes = le32_to_cpu(sblk->inodes); + msblk->fragments = le32_to_cpu(sblk->fragments); ++ msblk->ids = le16_to_cpu(sblk->no_ids); + flags = le16_to_cpu(sblk->flags); + + TRACE("Found valid superblock on %pg\n", sb->s_bdev); +@@ -177,7 +178,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) + TRACE("Block size %d\n", msblk->block_size); + TRACE("Number of inodes %d\n", msblk->inodes); + TRACE("Number of fragments %d\n", msblk->fragments); +- TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids)); ++ TRACE("Number of ids %d\n", msblk->ids); + TRACE("sblk->inode_table_start %llx\n", msblk->inode_table); + TRACE("sblk->directory_table_start %llx\n", msblk->directory_table); + TRACE("sblk->fragment_table_start %llx\n", +@@ -236,8 +237,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) + allocate_id_index_table: + /* Allocate and read id index table */ + msblk->id_table = squashfs_read_id_index_table(sb, +- le64_to_cpu(sblk->id_table_start), next_table, +- le16_to_cpu(sblk->no_ids)); ++ le64_to_cpu(sblk->id_table_start), next_table, msblk->ids); + if (IS_ERR(msblk->id_table)) { + errorf(fc, "unable to read id index table"); + err = PTR_ERR(msblk->id_table); +diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h +index 184129afd4566..d8a270d3ac4cb 100644 +--- a/fs/squashfs/xattr.h ++++ b/fs/squashfs/xattr.h +@@ -17,8 +17,16 @@ extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *, + static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb, + u64 start, u64 *xattr_table_start, int *xattr_ids) + { ++ struct squashfs_xattr_id_table *id_table; ++ ++ id_table = squashfs_read_table(sb, start, sizeof(*id_table)); ++ if (IS_ERR(id_table)) ++ return (__le64 *) id_table; ++ ++ *xattr_table_start = le64_to_cpu(id_table->xattr_table_start); ++ kfree(id_table); ++ + ERROR("Xattrs in filesystem, these will be ignored\n"); +- *xattr_table_start = start; + return ERR_PTR(-ENOTSUPP); + } + +diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c +index d99e08464554f..ead66670b41a5 100644 +--- a/fs/squashfs/xattr_id.c ++++ b/fs/squashfs/xattr_id.c +@@ -31,10 +31,15 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index, + struct squashfs_sb_info *msblk = sb->s_fs_info; + int block = SQUASHFS_XATTR_BLOCK(index); + int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index); +- u64 start_block = le64_to_cpu(msblk->xattr_id_table[block]); ++ u64 start_block; + struct squashfs_xattr_id id; + int err; + ++ if (index >= msblk->xattr_ids) ++ return -EINVAL; ++ ++ start_block = le64_to_cpu(msblk->xattr_id_table[block]); ++ + err = squashfs_read_metadata(sb, &id, &start_block, &offset, + sizeof(id)); + if (err < 0) +@@ -50,13 +55,17 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index, + /* + * Read uncompressed xattr id lookup table indexes from disk into memory + */ +-__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start, ++__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start, + u64 *xattr_table_start, int *xattr_ids) + { +- unsigned int len; ++ struct squashfs_sb_info *msblk = sb->s_fs_info; ++ unsigned int len, indexes; + struct squashfs_xattr_id_table *id_table; ++ __le64 *table; ++ u64 start, end; ++ int n; + +- id_table = squashfs_read_table(sb, start, sizeof(*id_table)); ++ id_table = squashfs_read_table(sb, table_start, sizeof(*id_table)); + if (IS_ERR(id_table)) + return (__le64 *) id_table; + +@@ -70,13 +79,52 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start, + if (*xattr_ids == 0) + return ERR_PTR(-EINVAL); + +- /* xattr_table should be less than start */ +- if (*xattr_table_start >= start) ++ len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids); ++ indexes = SQUASHFS_XATTR_BLOCKS(*xattr_ids); ++ ++ /* ++ * The computed size of the index table (len bytes) should exactly ++ * match the table start and end points ++ */ ++ start = table_start + sizeof(*id_table); ++ end = msblk->bytes_used; ++ ++ if (len != (end - start)) + return ERR_PTR(-EINVAL); + +- len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids); ++ table = squashfs_read_table(sb, start, len); ++ if (IS_ERR(table)) ++ return table; ++ ++ /* table[0], table[1], ... table[indexes - 1] store the locations ++ * of the compressed xattr id blocks. Each entry should be less than ++ * the next (i.e. table[0] < table[1]), and the difference between them ++ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1] ++ * should be less than table_start, and again the difference ++ * shouls be SQUASHFS_METADATA_SIZE or less. ++ * ++ * Finally xattr_table_start should be less than table[0]. ++ */ ++ for (n = 0; n < (indexes - 1); n++) { ++ start = le64_to_cpu(table[n]); ++ end = le64_to_cpu(table[n + 1]); ++ ++ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } ++ } ++ ++ start = le64_to_cpu(table[indexes - 1]); ++ if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } + +- TRACE("In read_xattr_index_table, length %d\n", len); ++ if (*xattr_table_start >= le64_to_cpu(table[0])) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } + +- return squashfs_read_table(sb, start + sizeof(*id_table), len); ++ return table; + } +diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h +index a60488867dd06..a121fd8e7c3a0 100644 +--- a/include/linux/kprobes.h ++++ b/include/linux/kprobes.h +@@ -232,7 +232,7 @@ extern void kprobes_inc_nmissed_count(struct kprobe *p); + extern bool arch_within_kprobe_blacklist(unsigned long addr); + extern int arch_populate_kprobe_blacklist(void); + extern bool arch_kprobe_on_func_entry(unsigned long offset); +-extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset); ++extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset); + + extern bool within_kprobe_blacklist(unsigned long addr); + extern int kprobe_add_ksym_blacklist(unsigned long entry); +diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h +index 9db6097c22c5d..a8d68c5a4ca61 100644 +--- a/include/linux/sunrpc/xdr.h ++++ b/include/linux/sunrpc/xdr.h +@@ -27,8 +27,7 @@ struct rpc_rqst; + #define XDR_QUADLEN(l) (((l) + 3) >> 2) + + /* +- * Generic opaque `network object.' At the kernel level, this type +- * is used only by lockd. ++ * Generic opaque `network object.' + */ + #define XDR_MAX_NETOBJ 1024 + struct xdr_netobj { +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index a67bfa803d983..2c248c4f6419c 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -9002,30 +9002,28 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) + insn->code == (BPF_ALU | BPF_MOD | BPF_X) || + insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { + bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; +- struct bpf_insn mask_and_div[] = { +- BPF_MOV32_REG(insn->src_reg, insn->src_reg), ++ bool isdiv = BPF_OP(insn->code) == BPF_DIV; ++ struct bpf_insn *patchlet; ++ struct bpf_insn chk_and_div[] = { + /* Rx div 0 -> 0 */ +- BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), ++ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | ++ BPF_JNE | BPF_K, insn->src_reg, ++ 0, 2, 0), + BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + *insn, + }; +- struct bpf_insn mask_and_mod[] = { +- BPF_MOV32_REG(insn->src_reg, insn->src_reg), ++ struct bpf_insn chk_and_mod[] = { + /* Rx mod 0 -> Rx */ +- BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), ++ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | ++ BPF_JEQ | BPF_K, insn->src_reg, ++ 0, 1, 0), + *insn, + }; +- struct bpf_insn *patchlet; + +- if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || +- insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { +- patchlet = mask_and_div + (is64 ? 1 : 0); +- cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); +- } else { +- patchlet = mask_and_mod + (is64 ? 1 : 0); +- cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); +- } ++ patchlet = isdiv ? chk_and_div : chk_and_mod; ++ cnt = isdiv ? ARRAY_SIZE(chk_and_div) : ++ ARRAY_SIZE(chk_and_mod); + + new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); + if (!new_prog) +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index 26ae92c12fc22..a7812c115e487 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -1948,29 +1948,45 @@ bool __weak arch_kprobe_on_func_entry(unsigned long offset) + return !offset; + } + +-bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset) ++/** ++ * kprobe_on_func_entry() -- check whether given address is function entry ++ * @addr: Target address ++ * @sym: Target symbol name ++ * @offset: The offset from the symbol or the address ++ * ++ * This checks whether the given @addr+@offset or @sym+@offset is on the ++ * function entry address or not. ++ * This returns 0 if it is the function entry, or -EINVAL if it is not. ++ * And also it returns -ENOENT if it fails the symbol or address lookup. ++ * Caller must pass @addr or @sym (either one must be NULL), or this ++ * returns -EINVAL. ++ */ ++int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset) + { + kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset); + + if (IS_ERR(kp_addr)) +- return false; ++ return PTR_ERR(kp_addr); + +- if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) || +- !arch_kprobe_on_func_entry(offset)) +- return false; ++ if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset)) ++ return -ENOENT; + +- return true; ++ if (!arch_kprobe_on_func_entry(offset)) ++ return -EINVAL; ++ ++ return 0; + } + + int register_kretprobe(struct kretprobe *rp) + { +- int ret = 0; ++ int ret; + struct kretprobe_instance *inst; + int i; + void *addr; + +- if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset)) +- return -EINVAL; ++ ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset); ++ if (ret) ++ return ret; + + /* If only rp->kp.addr is specified, check reregistering kprobes */ + if (rp->kp.addr && check_kprobe_rereg(&rp->kp)) +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c +index 1074a69beff3f..233322c77b76c 100644 +--- a/kernel/trace/trace_kprobe.c ++++ b/kernel/trace/trace_kprobe.c +@@ -220,9 +220,9 @@ bool trace_kprobe_on_func_entry(struct trace_event_call *call) + { + struct trace_kprobe *tk = trace_kprobe_primary_from_call(call); + +- return tk ? kprobe_on_func_entry(tk->rp.kp.addr, ++ return tk ? (kprobe_on_func_entry(tk->rp.kp.addr, + tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name, +- tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false; ++ tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false; + } + + bool trace_kprobe_error_injectable(struct trace_event_call *call) +@@ -811,9 +811,11 @@ static int trace_kprobe_create(int argc, const char *argv[]) + trace_probe_log_err(0, BAD_PROBE_ADDR); + goto parse_error; + } +- if (kprobe_on_func_entry(NULL, symbol, offset)) ++ ret = kprobe_on_func_entry(NULL, symbol, offset); ++ if (ret == 0) + flags |= TPARG_FL_FENTRY; +- if (offset && is_return && !(flags & TPARG_FL_FENTRY)) { ++ /* Defer the ENOENT case until register kprobe */ ++ if (ret == -EINVAL && is_return) { + trace_probe_log_err(0, BAD_RETPROBE); + goto parse_error; + } +diff --git a/net/key/af_key.c b/net/key/af_key.c +index a915bc86620af..907d04a474597 100644 +--- a/net/key/af_key.c ++++ b/net/key/af_key.c +@@ -2902,7 +2902,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t) + break; + if (!aalg->pfkey_supported) + continue; +- if (aalg_tmpl_set(t, aalg) && aalg->available) ++ if (aalg_tmpl_set(t, aalg)) + sz += sizeof(struct sadb_comb); + } + return sz + sizeof(struct sadb_prop); +@@ -2920,7 +2920,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t) + if (!ealg->pfkey_supported) + continue; + +- if (!(ealg_tmpl_set(t, ealg) && ealg->available)) ++ if (!(ealg_tmpl_set(t, ealg))) + continue; + + for (k = 1; ; k++) { +@@ -2931,7 +2931,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t) + if (!aalg->pfkey_supported) + continue; + +- if (aalg_tmpl_set(t, aalg) && aalg->available) ++ if (aalg_tmpl_set(t, aalg)) + sz += sizeof(struct sadb_comb); + } + } +diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c +index 5fe2b645912f6..132f8423addaa 100644 +--- a/net/mac80211/spectmgmt.c ++++ b/net/mac80211/spectmgmt.c +@@ -132,16 +132,20 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, + } + + if (wide_bw_chansw_ie) { ++ u8 new_seg1 = wide_bw_chansw_ie->new_center_freq_seg1; + struct ieee80211_vht_operation vht_oper = { + .chan_width = + wide_bw_chansw_ie->new_channel_width, + .center_freq_seg0_idx = + wide_bw_chansw_ie->new_center_freq_seg0, +- .center_freq_seg1_idx = +- wide_bw_chansw_ie->new_center_freq_seg1, ++ .center_freq_seg1_idx = new_seg1, + /* .basic_mcs_set doesn't matter */ + }; +- struct ieee80211_ht_operation ht_oper = {}; ++ struct ieee80211_ht_operation ht_oper = { ++ .operation_mode = ++ cpu_to_le16(new_seg1 << ++ IEEE80211_HT_OP_MODE_CCFS2_SHIFT), ++ }; + + /* default, for the case of IEEE80211_VHT_CHANWIDTH_USE_HT, + * to the previously parsed chandef +diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c +index 5fc6c028f89c0..b7a71578bd986 100644 +--- a/net/sunrpc/auth_gss/auth_gss.c ++++ b/net/sunrpc/auth_gss/auth_gss.c +@@ -29,6 +29,7 @@ + #include + #include + ++#include "auth_gss_internal.h" + #include "../netns.h" + + #include +@@ -125,35 +126,6 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) + clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); + } + +-static const void * +-simple_get_bytes(const void *p, const void *end, void *res, size_t len) +-{ +- const void *q = (const void *)((const char *)p + len); +- if (unlikely(q > end || q < p)) +- return ERR_PTR(-EFAULT); +- memcpy(res, p, len); +- return q; +-} +- +-static inline const void * +-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) +-{ +- const void *q; +- unsigned int len; +- +- p = simple_get_bytes(p, end, &len, sizeof(len)); +- if (IS_ERR(p)) +- return p; +- q = (const void *)((const char *)p + len); +- if (unlikely(q > end || q < p)) +- return ERR_PTR(-EFAULT); +- dest->data = kmemdup(p, len, GFP_NOFS); +- if (unlikely(dest->data == NULL)) +- return ERR_PTR(-ENOMEM); +- dest->len = len; +- return q; +-} +- + static struct gss_cl_ctx * + gss_cred_get_ctx(struct rpc_cred *cred) + { +diff --git a/net/sunrpc/auth_gss/auth_gss_internal.h b/net/sunrpc/auth_gss/auth_gss_internal.h +new file mode 100644 +index 0000000000000..f6d9631bd9d00 +--- /dev/null ++++ b/net/sunrpc/auth_gss/auth_gss_internal.h +@@ -0,0 +1,45 @@ ++// SPDX-License-Identifier: BSD-3-Clause ++/* ++ * linux/net/sunrpc/auth_gss/auth_gss_internal.h ++ * ++ * Internal definitions for RPCSEC_GSS client authentication ++ * ++ * Copyright (c) 2000 The Regents of the University of Michigan. ++ * All rights reserved. ++ * ++ */ ++#include ++#include ++#include ++ ++static inline const void * ++simple_get_bytes(const void *p, const void *end, void *res, size_t len) ++{ ++ const void *q = (const void *)((const char *)p + len); ++ if (unlikely(q > end || q < p)) ++ return ERR_PTR(-EFAULT); ++ memcpy(res, p, len); ++ return q; ++} ++ ++static inline const void * ++simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) ++{ ++ const void *q; ++ unsigned int len; ++ ++ p = simple_get_bytes(p, end, &len, sizeof(len)); ++ if (IS_ERR(p)) ++ return p; ++ q = (const void *)((const char *)p + len); ++ if (unlikely(q > end || q < p)) ++ return ERR_PTR(-EFAULT); ++ if (len) { ++ dest->data = kmemdup(p, len, GFP_NOFS); ++ if (unlikely(dest->data == NULL)) ++ return ERR_PTR(-ENOMEM); ++ } else ++ dest->data = NULL; ++ dest->len = len; ++ return q; ++} +diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c +index 6e5d6d2402158..b552dd4f32f80 100644 +--- a/net/sunrpc/auth_gss/gss_krb5_mech.c ++++ b/net/sunrpc/auth_gss/gss_krb5_mech.c +@@ -21,6 +21,8 @@ + #include + #include + ++#include "auth_gss_internal.h" ++ + #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) + # define RPCDBG_FACILITY RPCDBG_AUTH + #endif +@@ -164,35 +166,6 @@ get_gss_krb5_enctype(int etype) + return NULL; + } + +-static const void * +-simple_get_bytes(const void *p, const void *end, void *res, int len) +-{ +- const void *q = (const void *)((const char *)p + len); +- if (unlikely(q > end || q < p)) +- return ERR_PTR(-EFAULT); +- memcpy(res, p, len); +- return q; +-} +- +-static const void * +-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) +-{ +- const void *q; +- unsigned int len; +- +- p = simple_get_bytes(p, end, &len, sizeof(len)); +- if (IS_ERR(p)) +- return p; +- q = (const void *)((const char *)p + len); +- if (unlikely(q > end || q < p)) +- return ERR_PTR(-EFAULT); +- res->data = kmemdup(p, len, GFP_NOFS); +- if (unlikely(res->data == NULL)) +- return ERR_PTR(-ENOMEM); +- res->len = len; +- return q; +-} +- + static inline const void * + get_key(const void *p, const void *end, + struct krb5_ctx *ctx, struct crypto_sync_skcipher **res) +diff --git a/sound/soc/codecs/ak4458.c b/sound/soc/codecs/ak4458.c +index 71562154c0b1e..217e8ce9a4ba4 100644 +--- a/sound/soc/codecs/ak4458.c ++++ b/sound/soc/codecs/ak4458.c +@@ -523,18 +523,10 @@ static struct snd_soc_dai_driver ak4497_dai = { + .ops = &ak4458_dai_ops, + }; + +-static void ak4458_power_off(struct ak4458_priv *ak4458) ++static void ak4458_reset(struct ak4458_priv *ak4458, bool active) + { + if (ak4458->reset_gpiod) { +- gpiod_set_value_cansleep(ak4458->reset_gpiod, 0); +- usleep_range(1000, 2000); +- } +-} +- +-static void ak4458_power_on(struct ak4458_priv *ak4458) +-{ +- if (ak4458->reset_gpiod) { +- gpiod_set_value_cansleep(ak4458->reset_gpiod, 1); ++ gpiod_set_value_cansleep(ak4458->reset_gpiod, active); + usleep_range(1000, 2000); + } + } +@@ -548,7 +540,7 @@ static int ak4458_init(struct snd_soc_component *component) + if (ak4458->mute_gpiod) + gpiod_set_value_cansleep(ak4458->mute_gpiod, 1); + +- ak4458_power_on(ak4458); ++ ak4458_reset(ak4458, false); + + ret = snd_soc_component_update_bits(component, AK4458_00_CONTROL1, + 0x80, 0x80); /* ACKS bit = 1; 10000000 */ +@@ -571,7 +563,7 @@ static void ak4458_remove(struct snd_soc_component *component) + { + struct ak4458_priv *ak4458 = snd_soc_component_get_drvdata(component); + +- ak4458_power_off(ak4458); ++ ak4458_reset(ak4458, true); + } + + #ifdef CONFIG_PM +@@ -581,7 +573,7 @@ static int __maybe_unused ak4458_runtime_suspend(struct device *dev) + + regcache_cache_only(ak4458->regmap, true); + +- ak4458_power_off(ak4458); ++ ak4458_reset(ak4458, true); + + if (ak4458->mute_gpiod) + gpiod_set_value_cansleep(ak4458->mute_gpiod, 0); +@@ -596,8 +588,8 @@ static int __maybe_unused ak4458_runtime_resume(struct device *dev) + if (ak4458->mute_gpiod) + gpiod_set_value_cansleep(ak4458->mute_gpiod, 1); + +- ak4458_power_off(ak4458); +- ak4458_power_on(ak4458); ++ ak4458_reset(ak4458, true); ++ ak4458_reset(ak4458, false); + + regcache_cache_only(ak4458->regmap, false); + regcache_mark_dirty(ak4458->regmap); +diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c +index 2cb719893324a..1940b17f27efa 100644 +--- a/sound/soc/intel/skylake/skl-topology.c ++++ b/sound/soc/intel/skylake/skl-topology.c +@@ -3632,7 +3632,7 @@ static void skl_tplg_complete(struct snd_soc_component *component) + sprintf(chan_text, "c%d", mach->mach_params.dmic_num); + + for (i = 0; i < se->items; i++) { +- struct snd_ctl_elem_value val; ++ struct snd_ctl_elem_value val = {}; + + if (strstr(texts[i], chan_text)) { + val.value.enumerated.item[0] = i;