mirror of
https://github.com/armbian/build
synced 2025-09-24 19:47:06 +07:00
9932 lines
324 KiB
Diff
9932 lines
324 KiB
Diff
diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
|
|
index dbfbbe9ab28b12..961684e9466e6f 100644
|
|
--- a/Documentation/filesystems/f2fs.rst
|
|
+++ b/Documentation/filesystems/f2fs.rst
|
|
@@ -233,9 +233,9 @@ usrjquota=<file> Appoint specified file and type during mount, so that quota
|
|
grpjquota=<file> information can be properly updated during recovery flow,
|
|
prjjquota=<file> <quota file>: must be in root directory;
|
|
jqfmt=<quota type> <quota type>: [vfsold,vfsv0,vfsv1].
|
|
-offusrjquota Turn off user journalled quota.
|
|
-offgrpjquota Turn off group journalled quota.
|
|
-offprjjquota Turn off project journalled quota.
|
|
+usrjquota= Turn off user journalled quota.
|
|
+grpjquota= Turn off group journalled quota.
|
|
+prjjquota= Turn off project journalled quota.
|
|
quota Enable plain user disk quota accounting.
|
|
noquota Disable all plain disk quota option.
|
|
alloc_mode=%s Adjust block allocation policy, which supports "reuse"
|
|
diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml
|
|
index 3e38f69567939e..b463949736c566 100644
|
|
--- a/Documentation/netlink/specs/ethtool.yaml
|
|
+++ b/Documentation/netlink/specs/ethtool.yaml
|
|
@@ -1489,9 +1489,6 @@ operations:
|
|
|
|
do: &module-eeprom-get-op
|
|
request:
|
|
- attributes:
|
|
- - header
|
|
- reply:
|
|
attributes:
|
|
- header
|
|
- offset
|
|
@@ -1499,6 +1496,9 @@ operations:
|
|
- page
|
|
- bank
|
|
- i2c-address
|
|
+ reply:
|
|
+ attributes:
|
|
+ - header
|
|
- data
|
|
dump: *module-eeprom-get-op
|
|
-
|
|
diff --git a/Makefile b/Makefile
|
|
index 116eb523392a23..685a65992449d1 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 6
|
|
PATCHLEVEL = 6
|
|
-SUBLEVEL = 101
|
|
+SUBLEVEL = 102
|
|
EXTRAVERSION =
|
|
NAME = Pinguïn Aangedreven
|
|
|
|
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi
|
|
index 33d5f27285a476..9ece280d163afd 100644
|
|
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi
|
|
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-kontron-bl-common.dtsi
|
|
@@ -169,7 +169,6 @@ &uart2 {
|
|
pinctrl-0 = <&pinctrl_uart2>;
|
|
linux,rs485-enabled-at-boot-time;
|
|
rs485-rx-during-tx;
|
|
- rs485-rts-active-low;
|
|
uart-has-rtscts;
|
|
status = "okay";
|
|
};
|
|
diff --git a/arch/arm/boot/dts/nxp/vf/vfxxx.dtsi b/arch/arm/boot/dts/nxp/vf/vfxxx.dtsi
|
|
index d1095b700c5654..d96148abf5bb6a 100644
|
|
--- a/arch/arm/boot/dts/nxp/vf/vfxxx.dtsi
|
|
+++ b/arch/arm/boot/dts/nxp/vf/vfxxx.dtsi
|
|
@@ -612,7 +612,7 @@ usbmisc1: usb@400b4800 {
|
|
|
|
ftm: ftm@400b8000 {
|
|
compatible = "fsl,ftm-timer";
|
|
- reg = <0x400b8000 0x1000 0x400b9000 0x1000>;
|
|
+ reg = <0x400b8000 0x1000>, <0x400b9000 0x1000>;
|
|
interrupts = <44 IRQ_TYPE_LEVEL_HIGH>;
|
|
clock-names = "ftm-evt", "ftm-src",
|
|
"ftm-evt-counter-en", "ftm-src-counter-en";
|
|
diff --git a/arch/arm/boot/dts/ti/omap/am335x-boneblack.dts b/arch/arm/boot/dts/ti/omap/am335x-boneblack.dts
|
|
index 16b567e3cb4722..b4fdcf9c02b500 100644
|
|
--- a/arch/arm/boot/dts/ti/omap/am335x-boneblack.dts
|
|
+++ b/arch/arm/boot/dts/ti/omap/am335x-boneblack.dts
|
|
@@ -35,7 +35,7 @@ &gpio0 {
|
|
"P9_18 [spi0_d1]",
|
|
"P9_17 [spi0_cs0]",
|
|
"[mmc0_cd]",
|
|
- "P8_42A [ecappwm0]",
|
|
+ "P9_42A [ecappwm0]",
|
|
"P8_35 [lcd d12]",
|
|
"P8_33 [lcd d13]",
|
|
"P8_31 [lcd d14]",
|
|
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
|
|
index 0ca94b90bc4ec5..ba98daeb119cd9 100644
|
|
--- a/arch/arm/crypto/aes-neonbs-glue.c
|
|
+++ b/arch/arm/crypto/aes-neonbs-glue.c
|
|
@@ -245,7 +245,7 @@ static int ctr_encrypt(struct skcipher_request *req)
|
|
while (walk.nbytes > 0) {
|
|
const u8 *src = walk.src.virt.addr;
|
|
u8 *dst = walk.dst.virt.addr;
|
|
- int bytes = walk.nbytes;
|
|
+ unsigned int bytes = walk.nbytes;
|
|
|
|
if (unlikely(bytes < AES_BLOCK_SIZE))
|
|
src = dst = memcpy(buf + sizeof(buf) - bytes,
|
|
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
|
|
index 8ab0e45f2ad31c..f93c2b604c57fb 100644
|
|
--- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
|
|
+++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
|
|
@@ -284,6 +284,8 @@ &usdhc3 {
|
|
pinctrl-0 = <&pinctrl_usdhc3>;
|
|
pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
|
|
pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
|
|
+ assigned-clocks = <&clk IMX8MM_CLK_USDHC3>;
|
|
+ assigned-clock-rates = <400000000>;
|
|
bus-width = <8>;
|
|
non-removable;
|
|
status = "okay";
|
|
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
|
|
index 1760062e6ffcfd..f5f87b38912345 100644
|
|
--- a/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
|
|
+++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
|
|
@@ -295,6 +295,8 @@ &usdhc3 {
|
|
pinctrl-0 = <&pinctrl_usdhc3>;
|
|
pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
|
|
pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
|
|
+ assigned-clocks = <&clk IMX8MN_CLK_USDHC3>;
|
|
+ assigned-clock-rates = <400000000>;
|
|
bus-width = <8>;
|
|
non-removable;
|
|
status = "okay";
|
|
diff --git a/arch/arm64/boot/dts/qcom/msm8976.dtsi b/arch/arm64/boot/dts/qcom/msm8976.dtsi
|
|
index 4c5be22b47feea..e01f6e8a17f2fc 100644
|
|
--- a/arch/arm64/boot/dts/qcom/msm8976.dtsi
|
|
+++ b/arch/arm64/boot/dts/qcom/msm8976.dtsi
|
|
@@ -862,6 +862,7 @@ blsp1_dma: dma-controller@7884000 {
|
|
clock-names = "bam_clk";
|
|
#dma-cells = <1>;
|
|
qcom,ee = <0>;
|
|
+ qcom,controlled-remotely;
|
|
};
|
|
|
|
blsp1_uart1: serial@78af000 {
|
|
@@ -982,6 +983,7 @@ blsp2_dma: dma-controller@7ac4000 {
|
|
clock-names = "bam_clk";
|
|
#dma-cells = <1>;
|
|
qcom,ee = <0>;
|
|
+ qcom,controlled-remotely;
|
|
};
|
|
|
|
blsp2_uart2: serial@7af0000 {
|
|
diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
|
|
index 7758136d71d645..9dc00f759f19bb 100644
|
|
--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
|
|
+++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
|
|
@@ -3435,18 +3435,18 @@ spmi_bus: spmi@c440000 {
|
|
#interrupt-cells = <4>;
|
|
};
|
|
|
|
- sram@146aa000 {
|
|
+ sram@14680000 {
|
|
compatible = "qcom,sc7180-imem", "syscon", "simple-mfd";
|
|
- reg = <0 0x146aa000 0 0x2000>;
|
|
+ reg = <0 0x14680000 0 0x2e000>;
|
|
|
|
#address-cells = <1>;
|
|
#size-cells = <1>;
|
|
|
|
- ranges = <0 0 0x146aa000 0x2000>;
|
|
+ ranges = <0 0 0x14680000 0x2e000>;
|
|
|
|
- pil-reloc@94c {
|
|
+ pil-reloc@2a94c {
|
|
compatible = "qcom,pil-reloc-info";
|
|
- reg = <0x94c 0xc8>;
|
|
+ reg = <0x2a94c 0xc8>;
|
|
};
|
|
};
|
|
|
|
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
|
|
index 4ea693a0758565..64ea9d73d970a9 100644
|
|
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
|
|
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
|
|
@@ -4989,18 +4989,18 @@ spmi_bus: spmi@c440000 {
|
|
#interrupt-cells = <4>;
|
|
};
|
|
|
|
- sram@146bf000 {
|
|
+ sram@14680000 {
|
|
compatible = "qcom,sdm845-imem", "syscon", "simple-mfd";
|
|
- reg = <0 0x146bf000 0 0x1000>;
|
|
+ reg = <0 0x14680000 0 0x40000>;
|
|
|
|
#address-cells = <1>;
|
|
#size-cells = <1>;
|
|
|
|
- ranges = <0 0 0x146bf000 0x1000>;
|
|
+ ranges = <0 0 0x14680000 0x40000>;
|
|
|
|
- pil-reloc@94c {
|
|
+ pil-reloc@3f94c {
|
|
compatible = "qcom,pil-reloc-info";
|
|
- reg = <0x94c 0xc8>;
|
|
+ reg = <0x3f94c 0xc8>;
|
|
};
|
|
};
|
|
|
|
diff --git a/arch/m68k/Kconfig.debug b/arch/m68k/Kconfig.debug
|
|
index 30638a6e8edcb3..d036f903864c26 100644
|
|
--- a/arch/m68k/Kconfig.debug
|
|
+++ b/arch/m68k/Kconfig.debug
|
|
@@ -10,7 +10,7 @@ config BOOTPARAM_STRING
|
|
|
|
config EARLY_PRINTK
|
|
bool "Early printk"
|
|
- depends on !(SUN3 || M68000 || COLDFIRE)
|
|
+ depends on MMU_MOTOROLA
|
|
help
|
|
Write kernel log output directly to a serial port.
|
|
Where implemented, output goes to the framebuffer as well.
|
|
diff --git a/arch/m68k/kernel/early_printk.c b/arch/m68k/kernel/early_printk.c
|
|
index f11ef9f1f56fcf..521cbb8a150c99 100644
|
|
--- a/arch/m68k/kernel/early_printk.c
|
|
+++ b/arch/m68k/kernel/early_printk.c
|
|
@@ -16,25 +16,10 @@
|
|
#include "../mvme147/mvme147.h"
|
|
#include "../mvme16x/mvme16x.h"
|
|
|
|
-asmlinkage void __init debug_cons_nputs(const char *s, unsigned n);
|
|
-
|
|
-static void __ref debug_cons_write(struct console *c,
|
|
- const char *s, unsigned n)
|
|
-{
|
|
-#if !(defined(CONFIG_SUN3) || defined(CONFIG_M68000) || \
|
|
- defined(CONFIG_COLDFIRE))
|
|
- if (MACH_IS_MVME147)
|
|
- mvme147_scc_write(c, s, n);
|
|
- else if (MACH_IS_MVME16x)
|
|
- mvme16x_cons_write(c, s, n);
|
|
- else
|
|
- debug_cons_nputs(s, n);
|
|
-#endif
|
|
-}
|
|
+asmlinkage void __init debug_cons_nputs(struct console *c, const char *s, unsigned int n);
|
|
|
|
static struct console early_console_instance = {
|
|
.name = "debug",
|
|
- .write = debug_cons_write,
|
|
.flags = CON_PRINTBUFFER | CON_BOOT,
|
|
.index = -1
|
|
};
|
|
@@ -44,6 +29,12 @@ static int __init setup_early_printk(char *buf)
|
|
if (early_console || buf)
|
|
return 0;
|
|
|
|
+ if (MACH_IS_MVME147)
|
|
+ early_console_instance.write = mvme147_scc_write;
|
|
+ else if (MACH_IS_MVME16x)
|
|
+ early_console_instance.write = mvme16x_cons_write;
|
|
+ else
|
|
+ early_console_instance.write = debug_cons_nputs;
|
|
early_console = &early_console_instance;
|
|
register_console(early_console);
|
|
|
|
@@ -51,20 +42,15 @@ static int __init setup_early_printk(char *buf)
|
|
}
|
|
early_param("earlyprintk", setup_early_printk);
|
|
|
|
-/*
|
|
- * debug_cons_nputs() defined in arch/m68k/kernel/head.S cannot be called
|
|
- * after init sections are discarded (for platforms that use it).
|
|
- */
|
|
-#if !(defined(CONFIG_SUN3) || defined(CONFIG_M68000) || \
|
|
- defined(CONFIG_COLDFIRE))
|
|
-
|
|
static int __init unregister_early_console(void)
|
|
{
|
|
- if (!early_console || MACH_IS_MVME16x)
|
|
- return 0;
|
|
+ /*
|
|
+ * debug_cons_nputs() defined in arch/m68k/kernel/head.S cannot be
|
|
+ * called after init sections are discarded (for platforms that use it).
|
|
+ */
|
|
+ if (early_console && early_console->write == debug_cons_nputs)
|
|
+ return unregister_console(early_console);
|
|
|
|
- return unregister_console(early_console);
|
|
+ return 0;
|
|
}
|
|
late_initcall(unregister_early_console);
|
|
-
|
|
-#endif
|
|
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
|
|
index 9e812d8606be83..397114962a1427 100644
|
|
--- a/arch/m68k/kernel/head.S
|
|
+++ b/arch/m68k/kernel/head.S
|
|
@@ -3267,8 +3267,8 @@ func_return putn
|
|
* turns around and calls the internal routines. This routine
|
|
* is used by the boot console.
|
|
*
|
|
- * The calling parameters are:
|
|
- * void debug_cons_nputs(const char *str, unsigned length)
|
|
+ * The function signature is -
|
|
+ * void debug_cons_nputs(struct console *c, const char *s, unsigned int n)
|
|
*
|
|
* This routine does NOT understand variable arguments only
|
|
* simple strings!
|
|
@@ -3277,8 +3277,8 @@ ENTRY(debug_cons_nputs)
|
|
moveml %d0/%d1/%a0,%sp@-
|
|
movew %sr,%sp@-
|
|
ori #0x0700,%sr
|
|
- movel %sp@(18),%a0 /* fetch parameter */
|
|
- movel %sp@(22),%d1 /* fetch parameter */
|
|
+ movel %sp@(22),%a0 /* char *s */
|
|
+ movel %sp@(26),%d1 /* unsigned int n */
|
|
jra 2f
|
|
1:
|
|
#ifdef CONSOLE_DEBUG
|
|
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
|
|
index 93c2d695588a1c..ab8102f94e066f 100644
|
|
--- a/arch/mips/mm/tlb-r4k.c
|
|
+++ b/arch/mips/mm/tlb-r4k.c
|
|
@@ -506,6 +506,60 @@ static int __init set_ntlb(char *str)
|
|
|
|
__setup("ntlb=", set_ntlb);
|
|
|
|
+/* Initialise all TLB entries with unique values */
|
|
+static void r4k_tlb_uniquify(void)
|
|
+{
|
|
+ int entry = num_wired_entries();
|
|
+
|
|
+ htw_stop();
|
|
+ write_c0_entrylo0(0);
|
|
+ write_c0_entrylo1(0);
|
|
+
|
|
+ while (entry < current_cpu_data.tlbsize) {
|
|
+ unsigned long asid_mask = cpu_asid_mask(¤t_cpu_data);
|
|
+ unsigned long asid = 0;
|
|
+ int idx;
|
|
+
|
|
+ /* Skip wired MMID to make ginvt_mmid work */
|
|
+ if (cpu_has_mmid)
|
|
+ asid = MMID_KERNEL_WIRED + 1;
|
|
+
|
|
+ /* Check for match before using UNIQUE_ENTRYHI */
|
|
+ do {
|
|
+ if (cpu_has_mmid) {
|
|
+ write_c0_memorymapid(asid);
|
|
+ write_c0_entryhi(UNIQUE_ENTRYHI(entry));
|
|
+ } else {
|
|
+ write_c0_entryhi(UNIQUE_ENTRYHI(entry) | asid);
|
|
+ }
|
|
+ mtc0_tlbw_hazard();
|
|
+ tlb_probe();
|
|
+ tlb_probe_hazard();
|
|
+ idx = read_c0_index();
|
|
+ /* No match or match is on current entry */
|
|
+ if (idx < 0 || idx == entry)
|
|
+ break;
|
|
+ /*
|
|
+ * If we hit a match, we need to try again with
|
|
+ * a different ASID.
|
|
+ */
|
|
+ asid++;
|
|
+ } while (asid < asid_mask);
|
|
+
|
|
+ if (idx >= 0 && idx != entry)
|
|
+ panic("Unable to uniquify TLB entry %d", idx);
|
|
+
|
|
+ write_c0_index(entry);
|
|
+ mtc0_tlbw_hazard();
|
|
+ tlb_write_indexed();
|
|
+ entry++;
|
|
+ }
|
|
+
|
|
+ tlbw_use_hazard();
|
|
+ htw_start();
|
|
+ flush_micro_tlb();
|
|
+}
|
|
+
|
|
/*
|
|
* Configure TLB (for init or after a CPU has been powered off).
|
|
*/
|
|
@@ -545,7 +599,7 @@ static void r4k_tlb_configure(void)
|
|
temp_tlb_entry = current_cpu_data.tlbsize - 1;
|
|
|
|
/* From this point on the ARC firmware is dead. */
|
|
- local_flush_tlb_all();
|
|
+ r4k_tlb_uniquify();
|
|
|
|
/* Did I tell you that ARC SUCKS? */
|
|
}
|
|
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
|
|
index eaf3273372a982..80989e3f6780fb 100644
|
|
--- a/arch/powerpc/configs/ppc6xx_defconfig
|
|
+++ b/arch/powerpc/configs/ppc6xx_defconfig
|
|
@@ -254,7 +254,6 @@ CONFIG_NET_SCH_DSMARK=m
|
|
CONFIG_NET_SCH_NETEM=m
|
|
CONFIG_NET_SCH_INGRESS=m
|
|
CONFIG_NET_CLS_BASIC=m
|
|
-CONFIG_NET_CLS_TCINDEX=m
|
|
CONFIG_NET_CLS_ROUTE4=m
|
|
CONFIG_NET_CLS_FW=m
|
|
CONFIG_NET_CLS_U32=m
|
|
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
|
|
index 2e286bba2f6456..82626363a3090c 100644
|
|
--- a/arch/powerpc/kernel/eeh.c
|
|
+++ b/arch/powerpc/kernel/eeh.c
|
|
@@ -1130,6 +1130,7 @@ int eeh_unfreeze_pe(struct eeh_pe *pe)
|
|
|
|
return ret;
|
|
}
|
|
+EXPORT_SYMBOL_GPL(eeh_unfreeze_pe);
|
|
|
|
|
|
static struct pci_device_id eeh_reset_ids[] = {
|
|
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
|
|
index 438568a472d03b..9ba4adc214af7c 100644
|
|
--- a/arch/powerpc/kernel/eeh_driver.c
|
|
+++ b/arch/powerpc/kernel/eeh_driver.c
|
|
@@ -257,13 +257,12 @@ static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn,
|
|
struct pci_driver *driver;
|
|
enum pci_ers_result new_result;
|
|
|
|
- pci_lock_rescan_remove();
|
|
pdev = edev->pdev;
|
|
if (pdev)
|
|
get_device(&pdev->dev);
|
|
- pci_unlock_rescan_remove();
|
|
if (!pdev) {
|
|
eeh_edev_info(edev, "no device");
|
|
+ *result = PCI_ERS_RESULT_DISCONNECT;
|
|
return;
|
|
}
|
|
device_lock(&pdev->dev);
|
|
@@ -304,8 +303,9 @@ static void eeh_pe_report(const char *name, struct eeh_pe *root,
|
|
struct eeh_dev *edev, *tmp;
|
|
|
|
pr_info("EEH: Beginning: '%s'\n", name);
|
|
- eeh_for_each_pe(root, pe) eeh_pe_for_each_dev(pe, edev, tmp)
|
|
- eeh_pe_report_edev(edev, fn, result);
|
|
+ eeh_for_each_pe(root, pe)
|
|
+ eeh_pe_for_each_dev(pe, edev, tmp)
|
|
+ eeh_pe_report_edev(edev, fn, result);
|
|
if (result)
|
|
pr_info("EEH: Finished:'%s' with aggregate recovery state:'%s'\n",
|
|
name, pci_ers_result_name(*result));
|
|
@@ -383,6 +383,8 @@ static void eeh_dev_restore_state(struct eeh_dev *edev, void *userdata)
|
|
if (!edev)
|
|
return;
|
|
|
|
+ pci_lock_rescan_remove();
|
|
+
|
|
/*
|
|
* The content in the config space isn't saved because
|
|
* the blocked config space on some adapters. We have
|
|
@@ -393,14 +395,19 @@ static void eeh_dev_restore_state(struct eeh_dev *edev, void *userdata)
|
|
if (list_is_last(&edev->entry, &edev->pe->edevs))
|
|
eeh_pe_restore_bars(edev->pe);
|
|
|
|
+ pci_unlock_rescan_remove();
|
|
return;
|
|
}
|
|
|
|
pdev = eeh_dev_to_pci_dev(edev);
|
|
- if (!pdev)
|
|
+ if (!pdev) {
|
|
+ pci_unlock_rescan_remove();
|
|
return;
|
|
+ }
|
|
|
|
pci_restore_state(pdev);
|
|
+
|
|
+ pci_unlock_rescan_remove();
|
|
}
|
|
|
|
/**
|
|
@@ -647,9 +654,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
|
|
if (any_passed || driver_eeh_aware || (pe->type & EEH_PE_VF)) {
|
|
eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
|
|
} else {
|
|
- pci_lock_rescan_remove();
|
|
pci_hp_remove_devices(bus);
|
|
- pci_unlock_rescan_remove();
|
|
}
|
|
|
|
/*
|
|
@@ -665,8 +670,6 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
|
|
if (rc)
|
|
return rc;
|
|
|
|
- pci_lock_rescan_remove();
|
|
-
|
|
/* Restore PE */
|
|
eeh_ops->configure_bridge(pe);
|
|
eeh_pe_restore_bars(pe);
|
|
@@ -674,7 +677,6 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
|
|
/* Clear frozen state */
|
|
rc = eeh_clear_pe_frozen_state(pe, false);
|
|
if (rc) {
|
|
- pci_unlock_rescan_remove();
|
|
return rc;
|
|
}
|
|
|
|
@@ -709,7 +711,6 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
|
|
pe->tstamp = tstamp;
|
|
pe->freeze_count = cnt;
|
|
|
|
- pci_unlock_rescan_remove();
|
|
return 0;
|
|
}
|
|
|
|
@@ -843,10 +844,13 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
|
|
{LIST_HEAD_INIT(rmv_data.removed_vf_list), 0};
|
|
int devices = 0;
|
|
|
|
+ pci_lock_rescan_remove();
|
|
+
|
|
bus = eeh_pe_bus_get(pe);
|
|
if (!bus) {
|
|
pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
|
|
__func__, pe->phb->global_number, pe->addr);
|
|
+ pci_unlock_rescan_remove();
|
|
return;
|
|
}
|
|
|
|
@@ -1085,10 +1089,15 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
|
|
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
|
|
eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
|
|
|
|
- pci_lock_rescan_remove();
|
|
- pci_hp_remove_devices(bus);
|
|
- pci_unlock_rescan_remove();
|
|
+ bus = eeh_pe_bus_get(pe);
|
|
+ if (bus)
|
|
+ pci_hp_remove_devices(bus);
|
|
+ else
|
|
+ pr_err("%s: PCI bus for PHB#%x-PE#%x disappeared\n",
|
|
+ __func__, pe->phb->global_number, pe->addr);
|
|
+
|
|
/* The passed PE should no longer be used */
|
|
+ pci_unlock_rescan_remove();
|
|
return;
|
|
}
|
|
|
|
@@ -1105,6 +1114,8 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
|
|
eeh_clear_slot_attention(edev->pdev);
|
|
|
|
eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
|
|
+
|
|
+ pci_unlock_rescan_remove();
|
|
}
|
|
|
|
/**
|
|
@@ -1123,6 +1134,7 @@ void eeh_handle_special_event(void)
|
|
unsigned long flags;
|
|
int rc;
|
|
|
|
+ pci_lock_rescan_remove();
|
|
|
|
do {
|
|
rc = eeh_ops->next_error(&pe);
|
|
@@ -1162,10 +1174,12 @@ void eeh_handle_special_event(void)
|
|
|
|
break;
|
|
case EEH_NEXT_ERR_NONE:
|
|
+ pci_unlock_rescan_remove();
|
|
return;
|
|
default:
|
|
pr_warn("%s: Invalid value %d from next_error()\n",
|
|
__func__, rc);
|
|
+ pci_unlock_rescan_remove();
|
|
return;
|
|
}
|
|
|
|
@@ -1177,7 +1191,9 @@ void eeh_handle_special_event(void)
|
|
if (rc == EEH_NEXT_ERR_FROZEN_PE ||
|
|
rc == EEH_NEXT_ERR_FENCED_PHB) {
|
|
eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
|
|
+ pci_unlock_rescan_remove();
|
|
eeh_handle_normal_event(pe);
|
|
+ pci_lock_rescan_remove();
|
|
} else {
|
|
eeh_for_each_pe(pe, tmp_pe)
|
|
eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
|
|
@@ -1190,7 +1206,6 @@ void eeh_handle_special_event(void)
|
|
eeh_report_failure, NULL);
|
|
eeh_set_channel_state(pe, pci_channel_io_perm_failure);
|
|
|
|
- pci_lock_rescan_remove();
|
|
list_for_each_entry(hose, &hose_list, list_node) {
|
|
phb_pe = eeh_phb_pe_get(hose);
|
|
if (!phb_pe ||
|
|
@@ -1209,7 +1224,6 @@ void eeh_handle_special_event(void)
|
|
}
|
|
pci_hp_remove_devices(bus);
|
|
}
|
|
- pci_unlock_rescan_remove();
|
|
}
|
|
|
|
/*
|
|
@@ -1219,4 +1233,6 @@ void eeh_handle_special_event(void)
|
|
if (rc == EEH_NEXT_ERR_DEAD_IOC)
|
|
break;
|
|
} while (rc != EEH_NEXT_ERR_NONE);
|
|
+
|
|
+ pci_unlock_rescan_remove();
|
|
}
|
|
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
|
|
index 7d1b50599dd6c2..08095aeba5c983 100644
|
|
--- a/arch/powerpc/kernel/eeh_pe.c
|
|
+++ b/arch/powerpc/kernel/eeh_pe.c
|
|
@@ -671,10 +671,12 @@ static void eeh_bridge_check_link(struct eeh_dev *edev)
|
|
eeh_ops->write_config(edev, cap + PCI_EXP_LNKCTL, 2, val);
|
|
|
|
/* Check link */
|
|
- if (!edev->pdev->link_active_reporting) {
|
|
- eeh_edev_dbg(edev, "No link reporting capability\n");
|
|
- msleep(1000);
|
|
- return;
|
|
+ if (edev->pdev) {
|
|
+ if (!edev->pdev->link_active_reporting) {
|
|
+ eeh_edev_dbg(edev, "No link reporting capability\n");
|
|
+ msleep(1000);
|
|
+ return;
|
|
+ }
|
|
}
|
|
|
|
/* Wait the link is up until timeout (5s) */
|
|
diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c
|
|
index 0fe251c6ac2ce7..ac70e85b0df85d 100644
|
|
--- a/arch/powerpc/kernel/pci-hotplug.c
|
|
+++ b/arch/powerpc/kernel/pci-hotplug.c
|
|
@@ -111,6 +111,9 @@ void pci_hp_add_devices(struct pci_bus *bus)
|
|
struct pci_controller *phb;
|
|
struct device_node *dn = pci_bus_to_OF_node(bus);
|
|
|
|
+ if (!dn)
|
|
+ return;
|
|
+
|
|
phb = pci_bus_to_host(bus);
|
|
|
|
mode = PCI_PROBE_NORMAL;
|
|
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
|
|
index cab2f9c011a8db..7b420424b6d7c4 100644
|
|
--- a/arch/sh/Makefile
|
|
+++ b/arch/sh/Makefile
|
|
@@ -103,16 +103,16 @@ UTS_MACHINE := sh
|
|
LDFLAGS_vmlinux += -e _stext
|
|
|
|
ifdef CONFIG_CPU_LITTLE_ENDIAN
|
|
-ld-bfd := elf32-sh-linux
|
|
-LDFLAGS_vmlinux += --defsym jiffies=jiffies_64 --oformat $(ld-bfd)
|
|
+ld_bfd := elf32-sh-linux
|
|
+LDFLAGS_vmlinux += --defsym jiffies=jiffies_64 --oformat $(ld_bfd)
|
|
KBUILD_LDFLAGS += -EL
|
|
else
|
|
-ld-bfd := elf32-shbig-linux
|
|
-LDFLAGS_vmlinux += --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd)
|
|
+ld_bfd := elf32-shbig-linux
|
|
+LDFLAGS_vmlinux += --defsym jiffies=jiffies_64+4 --oformat $(ld_bfd)
|
|
KBUILD_LDFLAGS += -EB
|
|
endif
|
|
|
|
-export ld-bfd
|
|
+export ld_bfd
|
|
|
|
# Mach groups
|
|
machdir-$(CONFIG_SOLUTION_ENGINE) += mach-se
|
|
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
|
|
index b5e29f99c02c84..5d5b2da2a3f1f9 100644
|
|
--- a/arch/sh/boot/compressed/Makefile
|
|
+++ b/arch/sh/boot/compressed/Makefile
|
|
@@ -29,7 +29,7 @@ endif
|
|
|
|
ccflags-remove-$(CONFIG_MCOUNT) += -pg
|
|
|
|
-LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(IMAGE_OFFSET) -e startup \
|
|
+LDFLAGS_vmlinux := --oformat $(ld_bfd) -Ttext $(IMAGE_OFFSET) -e startup \
|
|
-T $(obj)/../../kernel/vmlinux.lds
|
|
|
|
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
|
@@ -53,7 +53,7 @@ $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
|
|
|
|
OBJCOPYFLAGS += -R .empty_zero_page
|
|
|
|
-LDFLAGS_piggy.o := -r --format binary --oformat $(ld-bfd) -T
|
|
+LDFLAGS_piggy.o := -r --format binary --oformat $(ld_bfd) -T
|
|
|
|
$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix_y) FORCE
|
|
$(call if_changed,ld)
|
|
diff --git a/arch/sh/boot/romimage/Makefile b/arch/sh/boot/romimage/Makefile
|
|
index c7c8be58400cd9..17b03df0a8de4d 100644
|
|
--- a/arch/sh/boot/romimage/Makefile
|
|
+++ b/arch/sh/boot/romimage/Makefile
|
|
@@ -13,7 +13,7 @@ mmcif-obj-$(CONFIG_CPU_SUBTYPE_SH7724) := $(obj)/mmcif-sh7724.o
|
|
load-$(CONFIG_ROMIMAGE_MMCIF) := $(mmcif-load-y)
|
|
obj-$(CONFIG_ROMIMAGE_MMCIF) := $(mmcif-obj-y)
|
|
|
|
-LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(load-y) -e romstart \
|
|
+LDFLAGS_vmlinux := --oformat $(ld_bfd) -Ttext $(load-y) -e romstart \
|
|
-T $(obj)/../../kernel/vmlinux.lds
|
|
|
|
$(obj)/vmlinux: $(obj)/head.o $(obj-y) $(obj)/piggy.o FORCE
|
|
@@ -24,7 +24,7 @@ OBJCOPYFLAGS += -j .empty_zero_page
|
|
$(obj)/zeropage.bin: vmlinux FORCE
|
|
$(call if_changed,objcopy)
|
|
|
|
-LDFLAGS_piggy.o := -r --format binary --oformat $(ld-bfd) -T
|
|
+LDFLAGS_piggy.o := -r --format binary --oformat $(ld_bfd) -T
|
|
|
|
$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/zeropage.bin arch/sh/boot/zImage FORCE
|
|
$(call if_changed,ld)
|
|
diff --git a/arch/um/drivers/rtc_user.c b/arch/um/drivers/rtc_user.c
|
|
index 7c3cec4c68cffe..006a5a164ea91d 100644
|
|
--- a/arch/um/drivers/rtc_user.c
|
|
+++ b/arch/um/drivers/rtc_user.c
|
|
@@ -28,7 +28,7 @@ int uml_rtc_start(bool timetravel)
|
|
int err;
|
|
|
|
if (timetravel) {
|
|
- int err = os_pipe(uml_rtc_irq_fds, 1, 1);
|
|
+ err = os_pipe(uml_rtc_irq_fds, 1, 1);
|
|
if (err)
|
|
goto fail;
|
|
} else {
|
|
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
|
|
index 5616c3b258060e..92c9f8b79f0dc4 100644
|
|
--- a/arch/x86/boot/compressed/sev.c
|
|
+++ b/arch/x86/boot/compressed/sev.c
|
|
@@ -165,6 +165,13 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
|
|
*/
|
|
if (op == SNP_PAGE_STATE_PRIVATE && pvalidate(paddr, RMP_PG_SIZE_4K, 1))
|
|
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
|
|
+
|
|
+ /*
|
|
+ * If validating memory (making it private) and affected by the
|
|
+ * cache-coherency vulnerability, perform the cache eviction mitigation.
|
|
+ */
|
|
+ if (op == SNP_PAGE_STATE_PRIVATE && !has_cpuflag(X86_FEATURE_COHERENCY_SFW_NO))
|
|
+ sev_evict_cache((void *)paddr, 1);
|
|
}
|
|
|
|
void snp_set_page_private(unsigned long paddr)
|
|
diff --git a/arch/x86/boot/cpuflags.c b/arch/x86/boot/cpuflags.c
|
|
index d75237ba7ce944..5660d3229d29c2 100644
|
|
--- a/arch/x86/boot/cpuflags.c
|
|
+++ b/arch/x86/boot/cpuflags.c
|
|
@@ -115,5 +115,18 @@ void get_cpuflags(void)
|
|
cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
|
|
&cpu.flags[1]);
|
|
}
|
|
+
|
|
+ if (max_amd_level >= 0x8000001f) {
|
|
+ u32 ebx;
|
|
+
|
|
+ /*
|
|
+ * The X86_FEATURE_COHERENCY_SFW_NO feature bit is in
|
|
+ * the virtualization flags entry (word 8) and set by
|
|
+ * scattered.c, so the bit needs to be explicitly set.
|
|
+ */
|
|
+ cpuid(0x8000001f, &ignored, &ebx, &ignored, &ignored);
|
|
+ if (ebx & BIT(31))
|
|
+ set_bit(X86_FEATURE_COHERENCY_SFW_NO, cpu.flags);
|
|
+ }
|
|
}
|
|
}
|
|
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
|
|
index 311cc58f29581d..199441d11fbbab 100644
|
|
--- a/arch/x86/include/asm/cpufeatures.h
|
|
+++ b/arch/x86/include/asm/cpufeatures.h
|
|
@@ -228,6 +228,7 @@
|
|
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* Intel FlexPriority */
|
|
#define X86_FEATURE_EPT ( 8*32+ 2) /* Intel Extended Page Table */
|
|
#define X86_FEATURE_VPID ( 8*32+ 3) /* Intel Virtual Processor ID */
|
|
+#define X86_FEATURE_COHERENCY_SFW_NO ( 8*32+ 4) /* "" SNP cache coherency software work around not needed */
|
|
|
|
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
|
|
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
|
|
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
|
|
index 7a42e699f6e39a..8fad19ec855514 100644
|
|
--- a/arch/x86/kernel/cpu/scattered.c
|
|
+++ b/arch/x86/kernel/cpu/scattered.c
|
|
@@ -46,6 +46,7 @@ static const struct cpuid_bit cpuid_bits[] = {
|
|
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
|
|
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
|
|
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
|
|
+ { X86_FEATURE_COHERENCY_SFW_NO, CPUID_EBX, 31, 0x8000001f, 0 },
|
|
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
|
|
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
|
|
{ X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 },
|
|
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
|
|
index acbec4de3ec31a..b90dfa46ec5b57 100644
|
|
--- a/arch/x86/kernel/sev-shared.c
|
|
+++ b/arch/x86/kernel/sev-shared.c
|
|
@@ -1068,6 +1068,24 @@ static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
|
|
}
|
|
}
|
|
|
|
+static inline void sev_evict_cache(void *va, int npages)
|
|
+{
|
|
+ volatile u8 val __always_unused;
|
|
+ u8 *bytes = va;
|
|
+ int page_idx;
|
|
+
|
|
+ /*
|
|
+ * For SEV guests, a read from the first/last cache-lines of a 4K page
|
|
+ * using the guest key is sufficient to cause a flush of all cache-lines
|
|
+ * associated with that 4K page without incurring all the overhead of a
|
|
+ * full CLFLUSH sequence.
|
|
+ */
|
|
+ for (page_idx = 0; page_idx < npages; page_idx++) {
|
|
+ val = bytes[page_idx * PAGE_SIZE];
|
|
+ val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
|
|
+ }
|
|
+}
|
|
+
|
|
static void pvalidate_pages(struct snp_psc_desc *desc)
|
|
{
|
|
struct psc_entry *e;
|
|
@@ -1100,6 +1118,24 @@ static void pvalidate_pages(struct snp_psc_desc *desc)
|
|
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
|
|
}
|
|
}
|
|
+
|
|
+ /*
|
|
+ * If not affected by the cache-coherency vulnerability there is no need
|
|
+ * to perform the cache eviction mitigation.
|
|
+ */
|
|
+ if (cpu_feature_enabled(X86_FEATURE_COHERENCY_SFW_NO))
|
|
+ return;
|
|
+
|
|
+ for (i = 0; i <= desc->hdr.end_entry; i++) {
|
|
+ e = &desc->entries[i];
|
|
+
|
|
+ /*
|
|
+ * If validating memory (making it private) perform the cache
|
|
+ * eviction mitigation.
|
|
+ */
|
|
+ if (e->operation == SNP_PAGE_STATE_PRIVATE)
|
|
+ sev_evict_cache(pfn_to_kaddr(e->gfn), e->pagesize ? 512 : 1);
|
|
+ }
|
|
}
|
|
|
|
static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
|
|
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
|
|
index 9905dc0e0b0960..c6a9a9d3ff2f37 100644
|
|
--- a/arch/x86/kernel/sev.c
|
|
+++ b/arch/x86/kernel/sev.c
|
|
@@ -688,12 +688,14 @@ static void __head
|
|
early_set_pages_state(unsigned long vaddr, unsigned long paddr,
|
|
unsigned long npages, enum psc_op op)
|
|
{
|
|
- unsigned long paddr_end;
|
|
+ unsigned long vaddr_begin, paddr_end;
|
|
u64 val;
|
|
int ret;
|
|
|
|
vaddr = vaddr & PAGE_MASK;
|
|
|
|
+ vaddr_begin = vaddr;
|
|
+
|
|
paddr = paddr & PAGE_MASK;
|
|
paddr_end = paddr + (npages << PAGE_SHIFT);
|
|
|
|
@@ -736,6 +738,13 @@ early_set_pages_state(unsigned long vaddr, unsigned long paddr,
|
|
paddr += PAGE_SIZE;
|
|
}
|
|
|
|
+ /*
|
|
+ * If validating memory (making it private) and affected by the
|
|
+ * cache-coherency vulnerability, perform the cache eviction mitigation.
|
|
+ */
|
|
+ if (op == SNP_PAGE_STATE_PRIVATE && !cpu_feature_enabled(X86_FEATURE_COHERENCY_SFW_NO))
|
|
+ sev_evict_cache((void *)vaddr_begin, npages);
|
|
+
|
|
return;
|
|
|
|
e_term:
|
|
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
|
|
index 2354c0156e51c9..94195d750e65c0 100644
|
|
--- a/arch/x86/mm/extable.c
|
|
+++ b/arch/x86/mm/extable.c
|
|
@@ -121,13 +121,12 @@ static bool ex_handler_sgx(const struct exception_table_entry *fixup,
|
|
static bool ex_handler_fprestore(const struct exception_table_entry *fixup,
|
|
struct pt_regs *regs)
|
|
{
|
|
- regs->ip = ex_fixup_addr(fixup);
|
|
-
|
|
WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
|
|
(void *)instruction_pointer(regs));
|
|
|
|
fpu_reset_from_exception_fixup();
|
|
- return true;
|
|
+
|
|
+ return ex_handler_default(fixup, regs);
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
|
|
index 8c873a8e39cd93..ec6d7a08104d91 100644
|
|
--- a/drivers/block/ublk_drv.c
|
|
+++ b/drivers/block/ublk_drv.c
|
|
@@ -2030,7 +2030,7 @@ static void ublk_deinit_queues(struct ublk_device *ub)
|
|
|
|
for (i = 0; i < nr_queues; i++)
|
|
ublk_deinit_queue(ub, i);
|
|
- kfree(ub->__queues);
|
|
+ kvfree(ub->__queues);
|
|
}
|
|
|
|
static int ublk_init_queues(struct ublk_device *ub)
|
|
@@ -2041,7 +2041,7 @@ static int ublk_init_queues(struct ublk_device *ub)
|
|
int i, ret = -ENOMEM;
|
|
|
|
ub->queue_size = ubq_size;
|
|
- ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL);
|
|
+ ub->__queues = kvcalloc(nr_queues, ubq_size, GFP_KERNEL);
|
|
if (!ub->__queues)
|
|
return ret;
|
|
|
|
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
|
|
index db507a66fa8acd..1a2d227b7b7b96 100644
|
|
--- a/drivers/bluetooth/btusb.c
|
|
+++ b/drivers/bluetooth/btusb.c
|
|
@@ -508,6 +508,10 @@ static const struct usb_device_id quirks_table[] = {
|
|
{ USB_DEVICE(0x13d3, 0x3549), .driver_info = BTUSB_REALTEK |
|
|
BTUSB_WIDEBAND_SPEECH },
|
|
|
|
+ /* Realtek 8851BU Bluetooth devices */
|
|
+ { USB_DEVICE(0x3625, 0x010b), .driver_info = BTUSB_REALTEK |
|
|
+ BTUSB_WIDEBAND_SPEECH },
|
|
+
|
|
/* Realtek 8852AE Bluetooth devices */
|
|
{ USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK |
|
|
BTUSB_WIDEBAND_SPEECH },
|
|
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
|
|
index 1e3048f2bb38f0..6c4e40d0365f00 100644
|
|
--- a/drivers/char/hw_random/mtk-rng.c
|
|
+++ b/drivers/char/hw_random/mtk-rng.c
|
|
@@ -142,7 +142,9 @@ static int mtk_rng_probe(struct platform_device *pdev)
|
|
dev_set_drvdata(&pdev->dev, priv);
|
|
pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT);
|
|
pm_runtime_use_autosuspend(&pdev->dev);
|
|
- devm_pm_runtime_enable(&pdev->dev);
|
|
+ ret = devm_pm_runtime_enable(&pdev->dev);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
dev_info(&pdev->dev, "registered RNG driver\n");
|
|
|
|
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
|
|
index 934e53a96dddac..00bf799964c61a 100644
|
|
--- a/drivers/clk/clk-axi-clkgen.c
|
|
+++ b/drivers/clk/clk-axi-clkgen.c
|
|
@@ -118,7 +118,7 @@ static const struct axi_clkgen_limits axi_clkgen_zynqmp_default_limits = {
|
|
|
|
static const struct axi_clkgen_limits axi_clkgen_zynq_default_limits = {
|
|
.fpfd_min = 10000,
|
|
- .fpfd_max = 300000,
|
|
+ .fpfd_max = 450000,
|
|
.fvco_min = 600000,
|
|
.fvco_max = 1200000,
|
|
};
|
|
diff --git a/drivers/clk/davinci/psc.c b/drivers/clk/davinci/psc.c
|
|
index cd85d9f158b0c2..6f7b8f082ad307 100644
|
|
--- a/drivers/clk/davinci/psc.c
|
|
+++ b/drivers/clk/davinci/psc.c
|
|
@@ -278,6 +278,11 @@ davinci_lpsc_clk_register(struct device *dev, const char *name,
|
|
|
|
lpsc->pm_domain.name = devm_kasprintf(dev, GFP_KERNEL, "%s: %s",
|
|
best_dev_name(dev), name);
|
|
+ if (!lpsc->pm_domain.name) {
|
|
+ clk_hw_unregister(&lpsc->hw);
|
|
+ kfree(lpsc);
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+ }
|
|
lpsc->pm_domain.attach_dev = davinci_psc_genpd_attach_dev;
|
|
lpsc->pm_domain.detach_dev = davinci_psc_genpd_detach_dev;
|
|
lpsc->pm_domain.flags = GENPD_FLAG_PM_CLK;
|
|
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
|
|
index f3ce8664b2883b..b05553faed6d14 100644
|
|
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
|
|
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
|
|
@@ -347,8 +347,7 @@ static SUNXI_CCU_GATE(dram_ohci_clk, "dram-ohci", "dram",
|
|
|
|
static const char * const de_parents[] = { "pll-video", "pll-periph0" };
|
|
static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
|
|
- 0x104, 0, 4, 24, 2, BIT(31),
|
|
- CLK_SET_RATE_PARENT);
|
|
+ 0x104, 0, 4, 24, 3, BIT(31), 0);
|
|
|
|
static const char * const tcon_parents[] = { "pll-video" };
|
|
static SUNXI_CCU_M_WITH_MUX_GATE(tcon_clk, "tcon", tcon_parents,
|
|
diff --git a/drivers/clk/xilinx/xlnx_vcu.c b/drivers/clk/xilinx/xlnx_vcu.c
|
|
index 60a3ed7c7263bf..299332818ba3e2 100644
|
|
--- a/drivers/clk/xilinx/xlnx_vcu.c
|
|
+++ b/drivers/clk/xilinx/xlnx_vcu.c
|
|
@@ -587,8 +587,8 @@ static void xvcu_unregister_clock_provider(struct xvcu_device *xvcu)
|
|
xvcu_clk_hw_unregister_leaf(hws[CLK_XVCU_ENC_MCU]);
|
|
if (!IS_ERR_OR_NULL(hws[CLK_XVCU_ENC_CORE]))
|
|
xvcu_clk_hw_unregister_leaf(hws[CLK_XVCU_ENC_CORE]);
|
|
-
|
|
- clk_hw_unregister_fixed_factor(xvcu->pll_post);
|
|
+ if (!IS_ERR_OR_NULL(xvcu->pll_post))
|
|
+ clk_hw_unregister_fixed_factor(xvcu->pll_post);
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
|
|
index 6682f422cadd90..cc98d8cf543303 100644
|
|
--- a/drivers/cpufreq/cpufreq.c
|
|
+++ b/drivers/cpufreq/cpufreq.c
|
|
@@ -1287,6 +1287,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
|
|
goto err_free_real_cpus;
|
|
}
|
|
|
|
+ init_rwsem(&policy->rwsem);
|
|
+
|
|
freq_constraints_init(&policy->constraints);
|
|
|
|
policy->nb_min.notifier_call = cpufreq_notifier_min;
|
|
@@ -1309,7 +1311,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
|
|
}
|
|
|
|
INIT_LIST_HEAD(&policy->policy_list);
|
|
- init_rwsem(&policy->rwsem);
|
|
spin_lock_init(&policy->transition_lock);
|
|
init_waitqueue_head(&policy->transition_wait);
|
|
INIT_WORK(&policy->update, handle_update);
|
|
@@ -2938,15 +2939,6 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
|
|
cpufreq_driver = driver_data;
|
|
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
|
|
|
- /*
|
|
- * Mark support for the scheduler's frequency invariance engine for
|
|
- * drivers that implement target(), target_index() or fast_switch().
|
|
- */
|
|
- if (!cpufreq_driver->setpolicy) {
|
|
- static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
|
|
- pr_debug("supports frequency invariance");
|
|
- }
|
|
-
|
|
if (driver_data->setpolicy)
|
|
driver_data->flags |= CPUFREQ_CONST_LOOPS;
|
|
|
|
@@ -2977,6 +2969,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
|
|
hp_online = ret;
|
|
ret = 0;
|
|
|
|
+ /*
|
|
+ * Mark support for the scheduler's frequency invariance engine for
|
|
+ * drivers that implement target(), target_index() or fast_switch().
|
|
+ */
|
|
+ if (!cpufreq_driver->setpolicy) {
|
|
+ static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
|
|
+ pr_debug("supports frequency invariance");
|
|
+ }
|
|
+
|
|
pr_debug("driver %s up and running\n", driver_data->name);
|
|
goto out;
|
|
|
|
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
|
|
index 8a4fdf212ce0de..4f1206ff0a10e9 100644
|
|
--- a/drivers/cpufreq/intel_pstate.c
|
|
+++ b/drivers/cpufreq/intel_pstate.c
|
|
@@ -2902,8 +2902,8 @@ static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
|
|
int max_pstate = policy->strict_target ?
|
|
target_pstate : cpu->max_perf_ratio;
|
|
|
|
- intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0,
|
|
- fast_switch);
|
|
+ intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate,
|
|
+ target_pstate, fast_switch);
|
|
} else if (target_pstate != old_pstate) {
|
|
intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch);
|
|
}
|
|
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
|
|
index 70434601f99bed..9e093d44a06629 100644
|
|
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
|
|
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
|
|
@@ -265,8 +265,8 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
|
|
}
|
|
|
|
chan->timeout = areq->cryptlen;
|
|
- rctx->nr_sgs = nr_sgs;
|
|
- rctx->nr_sgd = nr_sgd;
|
|
+ rctx->nr_sgs = ns;
|
|
+ rctx->nr_sgd = nd;
|
|
return 0;
|
|
|
|
theend_sgs:
|
|
diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
|
|
index a1055554b47a24..dc26bc22c91d1d 100644
|
|
--- a/drivers/crypto/ccp/ccp-debugfs.c
|
|
+++ b/drivers/crypto/ccp/ccp-debugfs.c
|
|
@@ -319,5 +319,8 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
|
|
|
|
void ccp5_debugfs_destroy(void)
|
|
{
|
|
+ mutex_lock(&ccp_debugfs_lock);
|
|
debugfs_remove_recursive(ccp_debugfs_dir);
|
|
+ ccp_debugfs_dir = NULL;
|
|
+ mutex_unlock(&ccp_debugfs_lock);
|
|
}
|
|
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
|
|
index 45063693859c01..de80c95309e627 100644
|
|
--- a/drivers/crypto/img-hash.c
|
|
+++ b/drivers/crypto/img-hash.c
|
|
@@ -436,7 +436,7 @@ static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
|
|
struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
|
|
|
|
if (ctx->flags & DRIVER_FLAGS_SG)
|
|
- dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
|
|
+ dma_unmap_sg(hdev->dev, ctx->sg, 1, DMA_TO_DEVICE);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
|
|
index f44c08f5f5ec4a..af4b978189e519 100644
|
|
--- a/drivers/crypto/inside-secure/safexcel_hash.c
|
|
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
|
|
@@ -249,7 +249,9 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
|
|
safexcel_complete(priv, ring);
|
|
|
|
if (sreq->nents) {
|
|
- dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
|
|
+ dma_unmap_sg(priv->dev, areq->src,
|
|
+ sg_nents_for_len(areq->src, areq->nbytes),
|
|
+ DMA_TO_DEVICE);
|
|
sreq->nents = 0;
|
|
}
|
|
|
|
@@ -497,7 +499,9 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
|
|
DMA_FROM_DEVICE);
|
|
unmap_sg:
|
|
if (req->nents) {
|
|
- dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
|
|
+ dma_unmap_sg(priv->dev, areq->src,
|
|
+ sg_nents_for_len(areq->src, areq->nbytes),
|
|
+ DMA_TO_DEVICE);
|
|
req->nents = 0;
|
|
}
|
|
cdesc_rollback:
|
|
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
|
|
index daba8ca05dbe42..b7322230353998 100644
|
|
--- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
|
|
+++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
|
|
@@ -68,6 +68,7 @@ struct ocs_hcu_ctx {
|
|
* @sg_data_total: Total data in the SG list at any time.
|
|
* @sg_data_offset: Offset into the data of the current individual SG node.
|
|
* @sg_dma_nents: Number of sg entries mapped in dma_list.
|
|
+ * @nents: Number of entries in the scatterlist.
|
|
*/
|
|
struct ocs_hcu_rctx {
|
|
struct ocs_hcu_dev *hcu_dev;
|
|
@@ -91,6 +92,7 @@ struct ocs_hcu_rctx {
|
|
unsigned int sg_data_total;
|
|
unsigned int sg_data_offset;
|
|
unsigned int sg_dma_nents;
|
|
+ unsigned int nents;
|
|
};
|
|
|
|
/**
|
|
@@ -199,7 +201,7 @@ static void kmb_ocs_hcu_dma_cleanup(struct ahash_request *req,
|
|
|
|
/* Unmap req->src (if mapped). */
|
|
if (rctx->sg_dma_nents) {
|
|
- dma_unmap_sg(dev, req->src, rctx->sg_dma_nents, DMA_TO_DEVICE);
|
|
+ dma_unmap_sg(dev, req->src, rctx->nents, DMA_TO_DEVICE);
|
|
rctx->sg_dma_nents = 0;
|
|
}
|
|
|
|
@@ -260,6 +262,10 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req)
|
|
rc = -ENOMEM;
|
|
goto cleanup;
|
|
}
|
|
+
|
|
+ /* Save the value of nents to pass to dma_unmap_sg. */
|
|
+ rctx->nents = nents;
|
|
+
|
|
/*
|
|
* The value returned by dma_map_sg() can be < nents; so update
|
|
* nents accordingly.
|
|
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
|
|
index e2dd568b87b519..621b5d3dfcef91 100644
|
|
--- a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
|
|
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
|
|
@@ -31,8 +31,10 @@ static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos)
|
|
struct adf_etr_ring_data *ring = sfile->private;
|
|
|
|
if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
|
|
- ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
|
|
+ ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) {
|
|
+ (*pos)++;
|
|
return NULL;
|
|
+ }
|
|
|
|
return ring->base_addr +
|
|
(ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
|
|
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c
|
|
index 76baed0a76c0ee..0d2ce20db6d86d 100644
|
|
--- a/drivers/crypto/intel/qat/qat_common/qat_bl.c
|
|
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c
|
|
@@ -38,7 +38,7 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
|
|
for (i = 0; i < blout->num_mapped_bufs; i++) {
|
|
dma_unmap_single(dev, blout->buffers[i].addr,
|
|
blout->buffers[i].len,
|
|
- DMA_FROM_DEVICE);
|
|
+ DMA_BIDIRECTIONAL);
|
|
}
|
|
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
|
|
|
|
@@ -160,7 +160,7 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
|
|
}
|
|
buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
|
|
sg->length - left,
|
|
- DMA_FROM_DEVICE);
|
|
+ DMA_BIDIRECTIONAL);
|
|
if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
|
|
goto err_out;
|
|
buffers[y].len = sg->length;
|
|
@@ -202,7 +202,7 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
|
|
if (!dma_mapping_error(dev, buflout->buffers[i].addr))
|
|
dma_unmap_single(dev, buflout->buffers[i].addr,
|
|
buflout->buffers[i].len,
|
|
- DMA_FROM_DEVICE);
|
|
+ DMA_BIDIRECTIONAL);
|
|
}
|
|
|
|
if (!buf->sgl_dst_valid)
|
|
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c
|
|
index 7842a9f22178c2..cf94ba3011d51b 100644
|
|
--- a/drivers/crypto/intel/qat/qat_common/qat_compression.c
|
|
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c
|
|
@@ -197,7 +197,7 @@ static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
|
|
struct adf_dc_data *dc_data = NULL;
|
|
u8 *obuff = NULL;
|
|
|
|
- dc_data = devm_kzalloc(dev, sizeof(*dc_data), GFP_KERNEL);
|
|
+ dc_data = kzalloc_node(sizeof(*dc_data), GFP_KERNEL, dev_to_node(dev));
|
|
if (!dc_data)
|
|
goto err;
|
|
|
|
@@ -205,7 +205,7 @@ static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
|
|
if (!obuff)
|
|
goto err;
|
|
|
|
- obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_FROM_DEVICE);
|
|
+ obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_BIDIRECTIONAL);
|
|
if (unlikely(dma_mapping_error(dev, obuff_p)))
|
|
goto err;
|
|
|
|
@@ -233,9 +233,9 @@ static void qat_free_dc_data(struct adf_accel_dev *accel_dev)
|
|
return;
|
|
|
|
dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz,
|
|
- DMA_FROM_DEVICE);
|
|
+ DMA_BIDIRECTIONAL);
|
|
kfree_sensitive(dc_data->ovf_buff);
|
|
- devm_kfree(dev, dc_data);
|
|
+ kfree(dc_data);
|
|
accel_dev->dc_data = NULL;
|
|
}
|
|
|
|
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
|
|
index 3876e3ce822f44..eabed9d977df6c 100644
|
|
--- a/drivers/crypto/marvell/cesa/cipher.c
|
|
+++ b/drivers/crypto/marvell/cesa/cipher.c
|
|
@@ -75,9 +75,12 @@ mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
|
|
static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
|
|
{
|
|
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
|
|
+ struct mv_cesa_engine *engine = creq->base.engine;
|
|
|
|
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
|
|
mv_cesa_skcipher_dma_cleanup(req);
|
|
+
|
|
+ atomic_sub(req->cryptlen, &engine->load);
|
|
}
|
|
|
|
static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
|
|
@@ -212,7 +215,6 @@ mv_cesa_skcipher_complete(struct crypto_async_request *req)
|
|
struct mv_cesa_engine *engine = creq->base.engine;
|
|
unsigned int ivsize;
|
|
|
|
- atomic_sub(skreq->cryptlen, &engine->load);
|
|
ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
|
|
|
|
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
|
|
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
|
|
index 6815eddc906812..e339ce7ad53310 100644
|
|
--- a/drivers/crypto/marvell/cesa/hash.c
|
|
+++ b/drivers/crypto/marvell/cesa/hash.c
|
|
@@ -110,9 +110,12 @@ static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
|
|
static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
|
|
{
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
|
+ struct mv_cesa_engine *engine = creq->base.engine;
|
|
|
|
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
|
|
mv_cesa_ahash_dma_cleanup(req);
|
|
+
|
|
+ atomic_sub(req->nbytes, &engine->load);
|
|
}
|
|
|
|
static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
|
|
@@ -395,8 +398,6 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
|
|
}
|
|
}
|
|
}
|
|
-
|
|
- atomic_sub(ahashreq->nbytes, &engine->load);
|
|
}
|
|
|
|
static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
|
|
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
|
|
index 49c542ecccde3b..7b991bbef4895a 100644
|
|
--- a/drivers/devfreq/devfreq.c
|
|
+++ b/drivers/devfreq/devfreq.c
|
|
@@ -1382,15 +1382,11 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
|
|
int ret;
|
|
struct device *dev = devfreq->dev.parent;
|
|
|
|
+ if (!devfreq->governor)
|
|
+ continue;
|
|
+
|
|
if (!strncmp(devfreq->governor->name, governor->name,
|
|
DEVFREQ_NAME_LEN)) {
|
|
- /* we should have a devfreq governor! */
|
|
- if (!devfreq->governor) {
|
|
- dev_warn(dev, "%s: Governor %s NOT present\n",
|
|
- __func__, governor->name);
|
|
- continue;
|
|
- /* Fall through */
|
|
- }
|
|
ret = devfreq->governor->event_handler(devfreq,
|
|
DEVFREQ_GOV_STOP, NULL);
|
|
if (ret) {
|
|
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
|
|
index ea48661e87ea70..ca0ba1d462832d 100644
|
|
--- a/drivers/dma/mv_xor.c
|
|
+++ b/drivers/dma/mv_xor.c
|
|
@@ -1061,8 +1061,16 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
|
|
*/
|
|
mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
|
|
mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
|
|
+ if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_src_addr))
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
|
|
mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
|
|
+ if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_dst_addr)) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err_unmap_src;
|
|
+ }
|
|
+
|
|
|
|
/* allocate coherent memory for hardware descriptors
|
|
* note: writecombine gives slightly better performance, but
|
|
@@ -1071,8 +1079,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
|
|
mv_chan->dma_desc_pool_virt =
|
|
dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
|
|
GFP_KERNEL);
|
|
- if (!mv_chan->dma_desc_pool_virt)
|
|
- return ERR_PTR(-ENOMEM);
|
|
+ if (!mv_chan->dma_desc_pool_virt) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err_unmap_dst;
|
|
+ }
|
|
|
|
/* discover transaction capabilites from the platform data */
|
|
dma_dev->cap_mask = cap_mask;
|
|
@@ -1155,6 +1165,13 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
|
|
err_free_dma:
|
|
dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
|
|
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
|
|
+err_unmap_dst:
|
|
+ dma_unmap_single(dma_dev->dev, mv_chan->dummy_dst_addr,
|
|
+ MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
|
|
+err_unmap_src:
|
|
+ dma_unmap_single(dma_dev->dev, mv_chan->dummy_src_addr,
|
|
+ MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
|
|
+
|
|
return ERR_PTR(ret);
|
|
}
|
|
|
|
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
|
|
index a361f8c29cd38d..69c80d9339033f 100644
|
|
--- a/drivers/dma/nbpfaxi.c
|
|
+++ b/drivers/dma/nbpfaxi.c
|
|
@@ -711,6 +711,9 @@ static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
|
|
list_add_tail(&ldesc->node, &lhead);
|
|
ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev,
|
|
hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE);
|
|
+ if (dma_mapping_error(dchan->device->dev,
|
|
+ ldesc->hwdesc_dma_addr))
|
|
+ goto unmap_error;
|
|
|
|
dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__,
|
|
hwdesc, &ldesc->hwdesc_dma_addr);
|
|
@@ -737,6 +740,16 @@ static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
|
|
spin_unlock_irq(&chan->lock);
|
|
|
|
return ARRAY_SIZE(dpage->desc);
|
|
+
|
|
+unmap_error:
|
|
+ while (i--) {
|
|
+ ldesc--; hwdesc--;
|
|
+
|
|
+ dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr,
|
|
+ sizeof(hwdesc), DMA_TO_DEVICE);
|
|
+ }
|
|
+
|
|
+ return -ENOMEM;
|
|
}
|
|
|
|
static void nbpf_desc_put(struct nbpf_desc *desc)
|
|
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
|
|
index 79a566f3564a57..c305ea4ec17d21 100644
|
|
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
|
|
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
|
|
@@ -149,7 +149,7 @@ int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
|
|
}
|
|
|
|
cgs_write_register(hwmgr->device, indirect_port, index);
|
|
- return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
|
|
+ return phm_wait_on_register(hwmgr, indirect_port + 1, value, mask);
|
|
}
|
|
|
|
int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
|
|
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
|
|
index 427dec0cd1d36d..b77ecec50733bc 100644
|
|
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
|
|
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
|
|
@@ -378,6 +378,7 @@ static const struct dpu_perf_cfg sc8180x_perf_data = {
|
|
.min_core_ib = 2400000,
|
|
.min_llcc_ib = 800000,
|
|
.min_dram_ib = 800000,
|
|
+ .min_prefill_lines = 24,
|
|
.danger_lut_tbl = {0xf, 0xffff, 0x0},
|
|
.safe_lut_tbl = {0xfff0, 0xf000, 0xffff},
|
|
.qos_lut_tbl = {
|
|
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
|
|
index cfe8b793d34467..69ab8d4f289cd8 100644
|
|
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
|
|
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
|
|
@@ -52,16 +52,9 @@ rockchip_fb_create(struct drm_device *dev, struct drm_file *file,
|
|
}
|
|
|
|
if (drm_is_afbc(mode_cmd->modifier[0])) {
|
|
- int ret, i;
|
|
-
|
|
ret = drm_gem_fb_afbc_init(dev, mode_cmd, afbc_fb);
|
|
if (ret) {
|
|
- struct drm_gem_object **obj = afbc_fb->base.obj;
|
|
-
|
|
- for (i = 0; i < info->num_planes; ++i)
|
|
- drm_gem_object_put(obj[i]);
|
|
-
|
|
- kfree(afbc_fb);
|
|
+ drm_framebuffer_put(&afbc_fb->base);
|
|
return ERR_PTR(ret);
|
|
}
|
|
}
|
|
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
|
|
index 7fb1c88bcc475f..69dfe69ce0f87d 100644
|
|
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
|
|
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
|
|
@@ -896,7 +896,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
|
|
.busy_domain = VMW_BO_DOMAIN_SYS,
|
|
.bo_type = ttm_bo_type_device,
|
|
.size = size,
|
|
- .pin = true,
|
|
+ .pin = false,
|
|
.keep_resv = true,
|
|
};
|
|
|
|
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
|
|
index 85f8fd6a21ec4d..956803ba6c1e1b 100644
|
|
--- a/drivers/i2c/busses/i2c-stm32f7.c
|
|
+++ b/drivers/i2c/busses/i2c-stm32f7.c
|
|
@@ -726,10 +726,11 @@ static void stm32f7_i2c_disable_dma_req(struct stm32f7_i2c_dev *i2c_dev)
|
|
|
|
static void stm32f7_i2c_dma_callback(void *arg)
|
|
{
|
|
- struct stm32f7_i2c_dev *i2c_dev = (struct stm32f7_i2c_dev *)arg;
|
|
+ struct stm32f7_i2c_dev *i2c_dev = arg;
|
|
struct stm32_i2c_dma *dma = i2c_dev->dma;
|
|
|
|
stm32f7_i2c_disable_dma_req(i2c_dev);
|
|
+ dmaengine_terminate_async(dma->chan_using);
|
|
dma_unmap_single(i2c_dev->dev, dma->dma_buf, dma->dma_len,
|
|
dma->dma_data_dir);
|
|
complete(&dma->dma_complete);
|
|
@@ -1496,17 +1497,11 @@ static irqreturn_t stm32f7_i2c_slave_isr_event(struct stm32f7_i2c_dev *i2c_dev)
|
|
static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
|
|
{
|
|
struct stm32f7_i2c_dev *i2c_dev = data;
|
|
- struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
|
|
- struct stm32_i2c_dma *dma = i2c_dev->dma;
|
|
- void __iomem *base = i2c_dev->base;
|
|
- u32 status, mask;
|
|
- int ret = IRQ_HANDLED;
|
|
+ u32 status;
|
|
|
|
- /* Check if the interrupt if for a slave device */
|
|
- if (!i2c_dev->master_mode) {
|
|
- ret = stm32f7_i2c_slave_isr_event(i2c_dev);
|
|
- return ret;
|
|
- }
|
|
+ /* Check if the interrupt is for a slave device */
|
|
+ if (!i2c_dev->master_mode)
|
|
+ return IRQ_WAKE_THREAD;
|
|
|
|
status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
|
|
|
|
@@ -1518,45 +1513,59 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
|
|
if (status & STM32F7_I2C_ISR_RXNE)
|
|
stm32f7_i2c_read_rx_data(i2c_dev);
|
|
|
|
+ /* Wake up the thread if other flags are raised */
|
|
+ if (status &
|
|
+ (STM32F7_I2C_ISR_NACKF | STM32F7_I2C_ISR_STOPF |
|
|
+ STM32F7_I2C_ISR_TC | STM32F7_I2C_ISR_TCR))
|
|
+ return IRQ_WAKE_THREAD;
|
|
+
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
|
|
+{
|
|
+ struct stm32f7_i2c_dev *i2c_dev = data;
|
|
+ struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
|
|
+ void __iomem *base = i2c_dev->base;
|
|
+ u32 status, mask;
|
|
+ int ret;
|
|
+
|
|
+ if (!i2c_dev->master_mode)
|
|
+ return stm32f7_i2c_slave_isr_event(i2c_dev);
|
|
+
|
|
+ status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
|
|
+
|
|
/* NACK received */
|
|
if (status & STM32F7_I2C_ISR_NACKF) {
|
|
dev_dbg(i2c_dev->dev, "<%s>: Receive NACK (addr %x)\n",
|
|
__func__, f7_msg->addr);
|
|
writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR);
|
|
- if (i2c_dev->use_dma) {
|
|
- stm32f7_i2c_disable_dma_req(i2c_dev);
|
|
- dmaengine_terminate_async(dma->chan_using);
|
|
- }
|
|
+ if (i2c_dev->use_dma)
|
|
+ stm32f7_i2c_dma_callback(i2c_dev);
|
|
f7_msg->result = -ENXIO;
|
|
}
|
|
|
|
- /* STOP detection flag */
|
|
- if (status & STM32F7_I2C_ISR_STOPF) {
|
|
- /* Disable interrupts */
|
|
- if (stm32f7_i2c_is_slave_registered(i2c_dev))
|
|
- mask = STM32F7_I2C_XFER_IRQ_MASK;
|
|
+ if (status & STM32F7_I2C_ISR_TCR) {
|
|
+ if (f7_msg->smbus)
|
|
+ stm32f7_i2c_smbus_reload(i2c_dev);
|
|
else
|
|
- mask = STM32F7_I2C_ALL_IRQ_MASK;
|
|
- stm32f7_i2c_disable_irq(i2c_dev, mask);
|
|
-
|
|
- /* Clear STOP flag */
|
|
- writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR);
|
|
-
|
|
- if (i2c_dev->use_dma && !f7_msg->result) {
|
|
- ret = IRQ_WAKE_THREAD;
|
|
- } else {
|
|
- i2c_dev->master_mode = false;
|
|
- complete(&i2c_dev->complete);
|
|
- }
|
|
+ stm32f7_i2c_reload(i2c_dev);
|
|
}
|
|
|
|
/* Transfer complete */
|
|
if (status & STM32F7_I2C_ISR_TC) {
|
|
+ /* Wait for dma transfer completion before sending next message */
|
|
+ if (i2c_dev->use_dma && !f7_msg->result) {
|
|
+ ret = wait_for_completion_timeout(&i2c_dev->dma->dma_complete, HZ);
|
|
+ if (!ret) {
|
|
+ dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__);
|
|
+ stm32f7_i2c_dma_callback(i2c_dev);
|
|
+ f7_msg->result = -ETIMEDOUT;
|
|
+ }
|
|
+ }
|
|
if (f7_msg->stop) {
|
|
mask = STM32F7_I2C_CR2_STOP;
|
|
stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask);
|
|
- } else if (i2c_dev->use_dma && !f7_msg->result) {
|
|
- ret = IRQ_WAKE_THREAD;
|
|
} else if (f7_msg->smbus) {
|
|
stm32f7_i2c_smbus_rep_start(i2c_dev);
|
|
} else {
|
|
@@ -1566,47 +1575,18 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
|
|
}
|
|
}
|
|
|
|
- if (status & STM32F7_I2C_ISR_TCR) {
|
|
- if (f7_msg->smbus)
|
|
- stm32f7_i2c_smbus_reload(i2c_dev);
|
|
+ /* STOP detection flag */
|
|
+ if (status & STM32F7_I2C_ISR_STOPF) {
|
|
+ /* Disable interrupts */
|
|
+ if (stm32f7_i2c_is_slave_registered(i2c_dev))
|
|
+ mask = STM32F7_I2C_XFER_IRQ_MASK;
|
|
else
|
|
- stm32f7_i2c_reload(i2c_dev);
|
|
- }
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
|
|
-{
|
|
- struct stm32f7_i2c_dev *i2c_dev = data;
|
|
- struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
|
|
- struct stm32_i2c_dma *dma = i2c_dev->dma;
|
|
- u32 status;
|
|
- int ret;
|
|
-
|
|
- /*
|
|
- * Wait for dma transfer completion before sending next message or
|
|
- * notity the end of xfer to the client
|
|
- */
|
|
- ret = wait_for_completion_timeout(&i2c_dev->dma->dma_complete, HZ);
|
|
- if (!ret) {
|
|
- dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__);
|
|
- stm32f7_i2c_disable_dma_req(i2c_dev);
|
|
- dmaengine_terminate_async(dma->chan_using);
|
|
- f7_msg->result = -ETIMEDOUT;
|
|
- }
|
|
+ mask = STM32F7_I2C_ALL_IRQ_MASK;
|
|
+ stm32f7_i2c_disable_irq(i2c_dev, mask);
|
|
|
|
- status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
|
|
+ /* Clear STOP flag */
|
|
+ writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR);
|
|
|
|
- if (status & STM32F7_I2C_ISR_TC) {
|
|
- if (f7_msg->smbus) {
|
|
- stm32f7_i2c_smbus_rep_start(i2c_dev);
|
|
- } else {
|
|
- i2c_dev->msg_id++;
|
|
- i2c_dev->msg++;
|
|
- stm32f7_i2c_xfer_msg(i2c_dev, i2c_dev->msg);
|
|
- }
|
|
- } else {
|
|
i2c_dev->master_mode = false;
|
|
complete(&i2c_dev->complete);
|
|
}
|
|
@@ -1614,21 +1594,20 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
-static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
|
|
+static irqreturn_t stm32f7_i2c_isr_error_thread(int irq, void *data)
|
|
{
|
|
struct stm32f7_i2c_dev *i2c_dev = data;
|
|
struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
|
|
+ u16 addr = f7_msg->addr;
|
|
void __iomem *base = i2c_dev->base;
|
|
struct device *dev = i2c_dev->dev;
|
|
- struct stm32_i2c_dma *dma = i2c_dev->dma;
|
|
u32 status;
|
|
|
|
status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
|
|
|
|
/* Bus error */
|
|
if (status & STM32F7_I2C_ISR_BERR) {
|
|
- dev_err(dev, "<%s>: Bus error accessing addr 0x%x\n",
|
|
- __func__, f7_msg->addr);
|
|
+ dev_err(dev, "Bus error accessing addr 0x%x\n", addr);
|
|
writel_relaxed(STM32F7_I2C_ICR_BERRCF, base + STM32F7_I2C_ICR);
|
|
stm32f7_i2c_release_bus(&i2c_dev->adap);
|
|
f7_msg->result = -EIO;
|
|
@@ -1636,21 +1615,19 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
|
|
|
|
/* Arbitration loss */
|
|
if (status & STM32F7_I2C_ISR_ARLO) {
|
|
- dev_dbg(dev, "<%s>: Arbitration loss accessing addr 0x%x\n",
|
|
- __func__, f7_msg->addr);
|
|
+ dev_dbg(dev, "Arbitration loss accessing addr 0x%x\n", addr);
|
|
writel_relaxed(STM32F7_I2C_ICR_ARLOCF, base + STM32F7_I2C_ICR);
|
|
f7_msg->result = -EAGAIN;
|
|
}
|
|
|
|
if (status & STM32F7_I2C_ISR_PECERR) {
|
|
- dev_err(dev, "<%s>: PEC error in reception accessing addr 0x%x\n",
|
|
- __func__, f7_msg->addr);
|
|
+ dev_err(dev, "PEC error in reception accessing addr 0x%x\n", addr);
|
|
writel_relaxed(STM32F7_I2C_ICR_PECCF, base + STM32F7_I2C_ICR);
|
|
f7_msg->result = -EINVAL;
|
|
}
|
|
|
|
if (status & STM32F7_I2C_ISR_ALERT) {
|
|
- dev_dbg(dev, "<%s>: SMBus alert received\n", __func__);
|
|
+ dev_dbg(dev, "SMBus alert received\n");
|
|
writel_relaxed(STM32F7_I2C_ICR_ALERTCF, base + STM32F7_I2C_ICR);
|
|
i2c_handle_smbus_alert(i2c_dev->alert->ara);
|
|
return IRQ_HANDLED;
|
|
@@ -1667,10 +1644,8 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
|
|
}
|
|
|
|
/* Disable dma */
|
|
- if (i2c_dev->use_dma) {
|
|
- stm32f7_i2c_disable_dma_req(i2c_dev);
|
|
- dmaengine_terminate_async(dma->chan_using);
|
|
- }
|
|
+ if (i2c_dev->use_dma)
|
|
+ stm32f7_i2c_dma_callback(i2c_dev);
|
|
|
|
i2c_dev->master_mode = false;
|
|
complete(&i2c_dev->complete);
|
|
@@ -2177,23 +2152,16 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
|
|
i2c_dev->wakeup_src = of_property_read_bool(pdev->dev.of_node,
|
|
"wakeup-source");
|
|
|
|
- i2c_dev->clk = devm_clk_get(&pdev->dev, NULL);
|
|
+ i2c_dev->clk = devm_clk_get_enabled(&pdev->dev, NULL);
|
|
if (IS_ERR(i2c_dev->clk))
|
|
return dev_err_probe(&pdev->dev, PTR_ERR(i2c_dev->clk),
|
|
- "Failed to get controller clock\n");
|
|
-
|
|
- ret = clk_prepare_enable(i2c_dev->clk);
|
|
- if (ret) {
|
|
- dev_err(&pdev->dev, "Failed to prepare_enable clock\n");
|
|
- return ret;
|
|
- }
|
|
+ "Failed to enable controller clock\n");
|
|
|
|
rst = devm_reset_control_get(&pdev->dev, NULL);
|
|
- if (IS_ERR(rst)) {
|
|
- ret = dev_err_probe(&pdev->dev, PTR_ERR(rst),
|
|
- "Error: Missing reset ctrl\n");
|
|
- goto clk_free;
|
|
- }
|
|
+ if (IS_ERR(rst))
|
|
+ return dev_err_probe(&pdev->dev, PTR_ERR(rst),
|
|
+ "Error: Missing reset ctrl\n");
|
|
+
|
|
reset_control_assert(rst);
|
|
udelay(2);
|
|
reset_control_deassert(rst);
|
|
@@ -2205,40 +2173,36 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
|
|
stm32f7_i2c_isr_event_thread,
|
|
IRQF_ONESHOT,
|
|
pdev->name, i2c_dev);
|
|
- if (ret) {
|
|
- dev_err(&pdev->dev, "Failed to request irq event %i\n",
|
|
- irq_event);
|
|
- goto clk_free;
|
|
- }
|
|
+ if (ret)
|
|
+ return dev_err_probe(&pdev->dev, ret, "Failed to request irq event\n");
|
|
|
|
- ret = devm_request_irq(&pdev->dev, irq_error, stm32f7_i2c_isr_error, 0,
|
|
- pdev->name, i2c_dev);
|
|
- if (ret) {
|
|
- dev_err(&pdev->dev, "Failed to request irq error %i\n",
|
|
- irq_error);
|
|
- goto clk_free;
|
|
- }
|
|
+ ret = devm_request_threaded_irq(&pdev->dev, irq_error,
|
|
+ NULL,
|
|
+ stm32f7_i2c_isr_error_thread,
|
|
+ IRQF_ONESHOT,
|
|
+ pdev->name, i2c_dev);
|
|
+ if (ret)
|
|
+ return dev_err_probe(&pdev->dev, ret, "Failed to request irq error\n");
|
|
|
|
setup = of_device_get_match_data(&pdev->dev);
|
|
if (!setup) {
|
|
dev_err(&pdev->dev, "Can't get device data\n");
|
|
- ret = -ENODEV;
|
|
- goto clk_free;
|
|
+ return -ENODEV;
|
|
}
|
|
i2c_dev->setup = *setup;
|
|
|
|
ret = stm32f7_i2c_setup_timing(i2c_dev, &i2c_dev->setup);
|
|
if (ret)
|
|
- goto clk_free;
|
|
+ return ret;
|
|
|
|
/* Setup Fast mode plus if necessary */
|
|
if (i2c_dev->bus_rate > I2C_MAX_FAST_MODE_FREQ) {
|
|
ret = stm32f7_i2c_setup_fm_plus_bits(pdev, i2c_dev);
|
|
if (ret)
|
|
- goto clk_free;
|
|
+ return ret;
|
|
ret = stm32f7_i2c_write_fm_plus_bits(i2c_dev, true);
|
|
if (ret)
|
|
- goto clk_free;
|
|
+ return ret;
|
|
}
|
|
|
|
adap = &i2c_dev->adap;
|
|
@@ -2349,9 +2313,6 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
|
|
fmp_clear:
|
|
stm32f7_i2c_write_fm_plus_bits(i2c_dev, false);
|
|
|
|
-clk_free:
|
|
- clk_disable_unprepare(i2c_dev->clk);
|
|
-
|
|
return ret;
|
|
}
|
|
|
|
@@ -2385,8 +2346,6 @@ static void stm32f7_i2c_remove(struct platform_device *pdev)
|
|
}
|
|
|
|
stm32f7_i2c_write_fm_plus_bits(i2c_dev, false);
|
|
-
|
|
- clk_disable_unprepare(i2c_dev->clk);
|
|
}
|
|
|
|
static int __maybe_unused stm32f7_i2c_runtime_suspend(struct device *dev)
|
|
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
|
|
index b010c4209ea381..29ad2f5ffabe20 100644
|
|
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
|
|
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
|
|
@@ -585,7 +585,8 @@ static struct erdma_mtt *erdma_create_cont_mtt(struct erdma_dev *dev,
|
|
static void erdma_destroy_mtt_buf_sg(struct erdma_dev *dev,
|
|
struct erdma_mtt *mtt)
|
|
{
|
|
- dma_unmap_sg(&dev->pdev->dev, mtt->sglist, mtt->nsg, DMA_TO_DEVICE);
|
|
+ dma_unmap_sg(&dev->pdev->dev, mtt->sglist,
|
|
+ DIV_ROUND_UP(mtt->size, PAGE_SIZE), DMA_TO_DEVICE);
|
|
vfree(mtt->sglist);
|
|
}
|
|
|
|
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
|
|
index 9d23d4b5c1285f..4a10b826d15a38 100644
|
|
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
|
|
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
|
|
@@ -5192,11 +5192,10 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
|
|
{
|
|
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
|
|
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
|
|
- struct hns_roce_v2_qp_context ctx[2];
|
|
- struct hns_roce_v2_qp_context *context = ctx;
|
|
- struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
|
|
+ struct hns_roce_v2_qp_context *context;
|
|
+ struct hns_roce_v2_qp_context *qpc_mask;
|
|
struct ib_device *ibdev = &hr_dev->ib_dev;
|
|
- int ret;
|
|
+ int ret = -ENOMEM;
|
|
|
|
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
|
|
return -EOPNOTSUPP;
|
|
@@ -5207,7 +5206,11 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
|
|
* we should set all bits of the relevant fields in context mask to
|
|
* 0 at the same time, else set them to 0x1.
|
|
*/
|
|
- memset(context, 0, hr_dev->caps.qpc_sz);
|
|
+ context = kvzalloc(sizeof(*context), GFP_KERNEL);
|
|
+ qpc_mask = kvzalloc(sizeof(*qpc_mask), GFP_KERNEL);
|
|
+ if (!context || !qpc_mask)
|
|
+ goto out;
|
|
+
|
|
memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz);
|
|
|
|
ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
|
|
@@ -5249,6 +5252,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
|
|
clear_qp(hr_qp);
|
|
|
|
out:
|
|
+ kvfree(qpc_mask);
|
|
+ kvfree(context);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/infiniband/hw/mlx5/dm.c b/drivers/infiniband/hw/mlx5/dm.c
|
|
index 3669c90b2dadc6..672e5cfd2fca5d 100644
|
|
--- a/drivers/infiniband/hw/mlx5/dm.c
|
|
+++ b/drivers/infiniband/hw/mlx5/dm.c
|
|
@@ -282,7 +282,7 @@ static struct ib_dm *handle_alloc_dm_memic(struct ib_ucontext *ctx,
|
|
int err;
|
|
u64 address;
|
|
|
|
- if (!MLX5_CAP_DEV_MEM(dm_db->dev, memic))
|
|
+ if (!dm_db || !MLX5_CAP_DEV_MEM(dm_db->dev, memic))
|
|
return ERR_PTR(-EOPNOTSUPP);
|
|
|
|
dm = kzalloc(sizeof(*dm), GFP_KERNEL);
|
|
diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c
|
|
index a741badaa966e0..4dd1d2f2e82162 100644
|
|
--- a/drivers/interconnect/qcom/sc8180x.c
|
|
+++ b/drivers/interconnect/qcom/sc8180x.c
|
|
@@ -1492,34 +1492,40 @@ static struct qcom_icc_bcm bcm_sh3 = {
|
|
|
|
static struct qcom_icc_bcm bcm_sn0 = {
|
|
.name = "SN0",
|
|
+ .num_nodes = 1,
|
|
.nodes = { &slv_qns_gemnoc_sf }
|
|
};
|
|
|
|
static struct qcom_icc_bcm bcm_sn1 = {
|
|
.name = "SN1",
|
|
+ .num_nodes = 1,
|
|
.nodes = { &slv_qxs_imem }
|
|
};
|
|
|
|
static struct qcom_icc_bcm bcm_sn2 = {
|
|
.name = "SN2",
|
|
.keepalive = true,
|
|
+ .num_nodes = 1,
|
|
.nodes = { &slv_qns_gemnoc_gc }
|
|
};
|
|
|
|
static struct qcom_icc_bcm bcm_co2 = {
|
|
.name = "CO2",
|
|
+ .num_nodes = 1,
|
|
.nodes = { &mas_qnm_npu }
|
|
};
|
|
|
|
static struct qcom_icc_bcm bcm_sn3 = {
|
|
.name = "SN3",
|
|
.keepalive = true,
|
|
+ .num_nodes = 2,
|
|
.nodes = { &slv_srvc_aggre1_noc,
|
|
&slv_qns_cnoc }
|
|
};
|
|
|
|
static struct qcom_icc_bcm bcm_sn4 = {
|
|
.name = "SN4",
|
|
+ .num_nodes = 1,
|
|
.nodes = { &slv_qxs_pimem }
|
|
};
|
|
|
|
diff --git a/drivers/interconnect/qcom/sc8280xp.c b/drivers/interconnect/qcom/sc8280xp.c
|
|
index 0270f6c64481a9..c646cdf8a19bf6 100644
|
|
--- a/drivers/interconnect/qcom/sc8280xp.c
|
|
+++ b/drivers/interconnect/qcom/sc8280xp.c
|
|
@@ -48,6 +48,7 @@ static struct qcom_icc_node qnm_a1noc_cfg = {
|
|
.id = SC8280XP_MASTER_A1NOC_CFG,
|
|
.channels = 1,
|
|
.buswidth = 4,
|
|
+ .num_links = 1,
|
|
.links = { SC8280XP_SLAVE_SERVICE_A1NOC },
|
|
};
|
|
|
|
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
|
|
index a5d6d786dba523..23cfb98fe90a7b 100644
|
|
--- a/drivers/iommu/amd/iommu.c
|
|
+++ b/drivers/iommu/amd/iommu.c
|
|
@@ -2159,8 +2159,21 @@ static inline u64 dma_max_address(void)
|
|
if (amd_iommu_pgtable == AMD_IOMMU_V1)
|
|
return ~0ULL;
|
|
|
|
- /* V2 with 4/5 level page table */
|
|
- return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1);
|
|
+ /*
|
|
+ * V2 with 4/5 level page table. Note that "2.2.6.5 AMD64 4-Kbyte Page
|
|
+ * Translation" shows that the V2 table sign extends the top of the
|
|
+ * address space creating a reserved region in the middle of the
|
|
+ * translation, just like the CPU does. Further Vasant says the docs are
|
|
+ * incomplete and this only applies to non-zero PASIDs. If the AMDv2
|
|
+ * page table is assigned to the 0 PASID then there is no sign extension
|
|
+ * check.
|
|
+ *
|
|
+ * Since the IOMMU must have a fixed geometry, and the core code does
|
|
+ * not understand sign extended addressing, we have to chop off the high
|
|
+ * bit to get consistent behavior with attachments of the domain to any
|
|
+ * PASID.
|
|
+ */
|
|
+ return ((1ULL << (PM_LEVEL_SHIFT(amd_iommu_gpt_level) - 1)) - 1);
|
|
}
|
|
|
|
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
|
|
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
|
|
index e7b736800dd023..4ff91df7694783 100644
|
|
--- a/drivers/irqchip/Kconfig
|
|
+++ b/drivers/irqchip/Kconfig
|
|
@@ -483,6 +483,7 @@ config IMX_MU_MSI
|
|
tristate "i.MX MU used as MSI controller"
|
|
depends on OF && HAS_IOMEM
|
|
depends on ARCH_MXC || COMPILE_TEST
|
|
+ depends on ARM || ARM64
|
|
default m if ARCH_MXC
|
|
select IRQ_DOMAIN
|
|
select IRQ_DOMAIN_HIERARCHY
|
|
diff --git a/drivers/md/md.c b/drivers/md/md.c
|
|
index ca7ae3aad2655f..b086cbf2408690 100644
|
|
--- a/drivers/md/md.c
|
|
+++ b/drivers/md/md.c
|
|
@@ -9465,8 +9465,8 @@ void md_check_recovery(struct mddev *mddev)
|
|
* remove disk.
|
|
*/
|
|
rdev_for_each_safe(rdev, tmp, mddev) {
|
|
- if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
|
|
- rdev->raid_disk < 0)
|
|
+ if (rdev->raid_disk < 0 &&
|
|
+ test_and_clear_bit(ClusterRemove, &rdev->flags))
|
|
md_kick_rdev_from_array(rdev);
|
|
}
|
|
}
|
|
@@ -9813,8 +9813,11 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
|
|
|
|
/* Check for change of roles in the active devices */
|
|
rdev_for_each_safe(rdev2, tmp, mddev) {
|
|
- if (test_bit(Faulty, &rdev2->flags))
|
|
+ if (test_bit(Faulty, &rdev2->flags)) {
|
|
+ if (test_bit(ClusterRemove, &rdev2->flags))
|
|
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
|
continue;
|
|
+ }
|
|
|
|
/* Check if the roles changed */
|
|
role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
|
|
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
|
|
index a662fb60f73f42..84fbf4e06cd33c 100644
|
|
--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
|
|
+++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
|
|
@@ -894,12 +894,12 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
|
|
|
|
p_h264_sps->flags &=
|
|
~V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS;
|
|
-
|
|
- if (p_h264_sps->chroma_format_idc < 3)
|
|
- p_h264_sps->flags &=
|
|
- ~V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE;
|
|
}
|
|
|
|
+ if (p_h264_sps->chroma_format_idc < 3)
|
|
+ p_h264_sps->flags &=
|
|
+ ~V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE;
|
|
+
|
|
if (p_h264_sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY)
|
|
p_h264_sps->flags &=
|
|
~V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD;
|
|
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
|
|
index 8c22064ead3870..f2bd1984609ccc 100644
|
|
--- a/drivers/mtd/ftl.c
|
|
+++ b/drivers/mtd/ftl.c
|
|
@@ -344,7 +344,7 @@ static int erase_xfer(partition_t *part,
|
|
return -ENOMEM;
|
|
|
|
erase->addr = xfer->Offset;
|
|
- erase->len = 1 << part->header.EraseUnitSize;
|
|
+ erase->len = 1ULL << part->header.EraseUnitSize;
|
|
|
|
ret = mtd_erase(part->mbd.mtd, erase);
|
|
if (!ret) {
|
|
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
|
|
index 3f494f7c7ecbdb..d4fd1302008ebd 100644
|
|
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
|
|
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
|
|
@@ -373,7 +373,7 @@ static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc,
|
|
dma_cookie_t cookie;
|
|
|
|
buf_dma = dma_map_single(nc->dev, buf, len, dir);
|
|
- if (dma_mapping_error(nc->dev, dev_dma)) {
|
|
+ if (dma_mapping_error(nc->dev, buf_dma)) {
|
|
dev_err(nc->dev,
|
|
"Failed to prepare a buffer for DMA access\n");
|
|
goto err;
|
|
diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c
|
|
index 3c7dee1be21df1..0b402823b619cf 100644
|
|
--- a/drivers/mtd/nand/raw/atmel/pmecc.c
|
|
+++ b/drivers/mtd/nand/raw/atmel/pmecc.c
|
|
@@ -143,6 +143,7 @@ struct atmel_pmecc_caps {
|
|
int nstrengths;
|
|
int el_offset;
|
|
bool correct_erased_chunks;
|
|
+ bool clk_ctrl;
|
|
};
|
|
|
|
struct atmel_pmecc {
|
|
@@ -843,6 +844,10 @@ static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev,
|
|
if (IS_ERR(pmecc->regs.errloc))
|
|
return ERR_CAST(pmecc->regs.errloc);
|
|
|
|
+ /* pmecc data setup time */
|
|
+ if (caps->clk_ctrl)
|
|
+ writel(PMECC_CLK_133MHZ, pmecc->regs.base + ATMEL_PMECC_CLK);
|
|
+
|
|
/* Disable all interrupts before registering the PMECC handler. */
|
|
writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR);
|
|
atmel_pmecc_reset(pmecc);
|
|
@@ -896,6 +901,7 @@ static struct atmel_pmecc_caps at91sam9g45_caps = {
|
|
.strengths = atmel_pmecc_strengths,
|
|
.nstrengths = 5,
|
|
.el_offset = 0x8c,
|
|
+ .clk_ctrl = true,
|
|
};
|
|
|
|
static struct atmel_pmecc_caps sama5d4_caps = {
|
|
diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
|
|
index 2a95dd63b8c203..f68600ce5bfad4 100644
|
|
--- a/drivers/mtd/nand/raw/rockchip-nand-controller.c
|
|
+++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
|
|
@@ -656,9 +656,16 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
|
|
|
|
dma_data = dma_map_single(nfc->dev, (void *)nfc->page_buf,
|
|
mtd->writesize, DMA_TO_DEVICE);
|
|
+ if (dma_mapping_error(nfc->dev, dma_data))
|
|
+ return -ENOMEM;
|
|
+
|
|
dma_oob = dma_map_single(nfc->dev, nfc->oob_buf,
|
|
ecc->steps * oob_step,
|
|
DMA_TO_DEVICE);
|
|
+ if (dma_mapping_error(nfc->dev, dma_oob)) {
|
|
+ dma_unmap_single(nfc->dev, dma_data, mtd->writesize, DMA_TO_DEVICE);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
|
|
reinit_completion(&nfc->done);
|
|
writel(INT_DMA, nfc->regs + nfc->cfg->int_en_off);
|
|
@@ -772,9 +779,17 @@ static int rk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *buf, int oob_on,
|
|
dma_data = dma_map_single(nfc->dev, nfc->page_buf,
|
|
mtd->writesize,
|
|
DMA_FROM_DEVICE);
|
|
+ if (dma_mapping_error(nfc->dev, dma_data))
|
|
+ return -ENOMEM;
|
|
+
|
|
dma_oob = dma_map_single(nfc->dev, nfc->oob_buf,
|
|
ecc->steps * oob_step,
|
|
DMA_FROM_DEVICE);
|
|
+ if (dma_mapping_error(nfc->dev, dma_oob)) {
|
|
+ dma_unmap_single(nfc->dev, dma_data, mtd->writesize,
|
|
+ DMA_FROM_DEVICE);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
|
|
/*
|
|
* The first blocks (4, 8 or 16 depending on the device)
|
|
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
|
|
index 73b448cd00f29f..0b74b79b08f0a2 100644
|
|
--- a/drivers/net/can/kvaser_pciefd.c
|
|
+++ b/drivers/net/can/kvaser_pciefd.c
|
|
@@ -927,6 +927,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
|
|
can->err_rep_cnt = 0;
|
|
can->bec.txerr = 0;
|
|
can->bec.rxerr = 0;
|
|
+ can->can.dev->dev_port = i;
|
|
|
|
init_completion(&can->start_comp);
|
|
init_completion(&can->flush_comp);
|
|
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
|
|
index 15f28b6fe758ee..022b5b79247c5d 100644
|
|
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
|
|
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
|
|
@@ -856,6 +856,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
|
|
}
|
|
SET_NETDEV_DEV(netdev, &dev->intf->dev);
|
|
netdev->dev_id = channel;
|
|
+ netdev->dev_port = channel;
|
|
|
|
dev->nets[channel] = priv;
|
|
|
|
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
|
|
index 4d85b29a17b787..ebefc274b50a5f 100644
|
|
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
|
|
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
|
|
@@ -49,7 +49,7 @@ struct __packed pcan_ufd_fw_info {
|
|
__le32 ser_no; /* S/N */
|
|
__le32 flags; /* special functions */
|
|
|
|
- /* extended data when type == PCAN_USBFD_TYPE_EXT */
|
|
+ /* extended data when type >= PCAN_USBFD_TYPE_EXT */
|
|
u8 cmd_out_ep; /* ep for cmd */
|
|
u8 cmd_in_ep; /* ep for replies */
|
|
u8 data_out_ep[2]; /* ep for CANx TX */
|
|
@@ -982,10 +982,11 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
|
|
dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO;
|
|
}
|
|
|
|
- /* if vendor rsp is of type 2, then it contains EP numbers to
|
|
- * use for cmds pipes. If not, then default EP should be used.
|
|
+ /* if vendor rsp type is greater than or equal to 2, then it
|
|
+ * contains EP numbers to use for cmds pipes. If not, then
|
|
+ * default EP should be used.
|
|
*/
|
|
- if (fw_info->type != cpu_to_le16(PCAN_USBFD_TYPE_EXT)) {
|
|
+ if (le16_to_cpu(fw_info->type) < PCAN_USBFD_TYPE_EXT) {
|
|
fw_info->cmd_out_ep = PCAN_USBPRO_EP_CMDOUT;
|
|
fw_info->cmd_in_ep = PCAN_USBPRO_EP_CMDIN;
|
|
}
|
|
@@ -1018,11 +1019,11 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
|
|
dev->can_channel_id =
|
|
le32_to_cpu(pdev->usb_if->fw_info.dev_id[dev->ctrl_idx]);
|
|
|
|
- /* if vendor rsp is of type 2, then it contains EP numbers to
|
|
- * use for data pipes. If not, then statically defined EP are used
|
|
- * (see peak_usb_create_dev()).
|
|
+ /* if vendor rsp type is greater than or equal to 2, then it contains EP
|
|
+ * numbers to use for data pipes. If not, then statically defined EP are
|
|
+ * used (see peak_usb_create_dev()).
|
|
*/
|
|
- if (fw_info->type == cpu_to_le16(PCAN_USBFD_TYPE_EXT)) {
|
|
+ if (le16_to_cpu(fw_info->type) >= PCAN_USBFD_TYPE_EXT) {
|
|
dev->ep_msg_in = fw_info->data_in_ep;
|
|
dev->ep_msg_out = fw_info->data_out_ep[dev->ctrl_idx];
|
|
}
|
|
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
|
|
index a89aa4ac0a064a..779f1324bb5f82 100644
|
|
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
|
|
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
|
|
@@ -3852,8 +3852,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
|
|
status = be_mcc_notify_wait(adapter);
|
|
|
|
err:
|
|
- dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
|
|
spin_unlock_bh(&adapter->mcc_lock);
|
|
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
|
|
return status;
|
|
}
|
|
|
|
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
|
|
index 6119a410883815..65a2816142d962 100644
|
|
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
|
|
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
|
|
@@ -189,13 +189,14 @@ struct fm10k_q_vector {
|
|
struct fm10k_ring_container rx, tx;
|
|
|
|
struct napi_struct napi;
|
|
+ struct rcu_head rcu; /* to avoid race with update stats on free */
|
|
+
|
|
cpumask_t affinity_mask;
|
|
char name[IFNAMSIZ + 9];
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
struct dentry *dbg_q_vector;
|
|
#endif /* CONFIG_DEBUG_FS */
|
|
- struct rcu_head rcu; /* to avoid race with update stats on free */
|
|
|
|
/* for dynamic allocation of rings associated with this q_vector */
|
|
struct fm10k_ring ring[] ____cacheline_internodealigned_in_smp;
|
|
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
|
|
index 68f403dd2f5267..9fb7c5fe05d15f 100644
|
|
--- a/drivers/net/ethernet/intel/i40e/i40e.h
|
|
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
|
|
@@ -953,6 +953,7 @@ struct i40e_q_vector {
|
|
u16 reg_idx; /* register index of the interrupt */
|
|
|
|
struct napi_struct napi;
|
|
+ struct rcu_head rcu; /* to avoid race with update stats on free */
|
|
|
|
struct i40e_ring_container rx;
|
|
struct i40e_ring_container tx;
|
|
@@ -963,7 +964,6 @@ struct i40e_q_vector {
|
|
cpumask_t affinity_mask;
|
|
struct irq_affinity_notify affinity_notify;
|
|
|
|
- struct rcu_head rcu; /* to avoid race with update stats on free */
|
|
char name[I40E_INT_NAME_STR_LEN];
|
|
bool arm_wb_state;
|
|
bool in_busy_poll;
|
|
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
|
|
index b6f0376e42f4b5..d15182657cead9 100644
|
|
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
|
|
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
|
|
@@ -503,9 +503,10 @@ struct ixgbe_q_vector {
|
|
struct ixgbe_ring_container rx, tx;
|
|
|
|
struct napi_struct napi;
|
|
+ struct rcu_head rcu; /* to avoid race with update stats on free */
|
|
+
|
|
cpumask_t affinity_mask;
|
|
int numa_node;
|
|
- struct rcu_head rcu; /* to avoid race with update stats on free */
|
|
char name[IFNAMSIZ + 9];
|
|
|
|
/* for dynamic allocation of rings associated with this q_vector */
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
|
|
index 8e25f4ef5cccee..5ae787656a7ca0 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
|
|
@@ -331,6 +331,9 @@ static int port_set_buffer(struct mlx5e_priv *priv,
|
|
if (err)
|
|
goto out;
|
|
|
|
+ /* RO bits should be set to 0 on write */
|
|
+ MLX5_SET(pbmc_reg, in, port_buffer_size, 0);
|
|
+
|
|
err = mlx5e_port_set_pbmc(mdev, in);
|
|
out:
|
|
kfree(in);
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
|
|
index 51a144246ea623..f96e9bbf8fc69c 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
|
|
@@ -333,6 +333,10 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
|
|
if (unlikely(!sa_entry)) {
|
|
rcu_read_unlock();
|
|
atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
|
|
+ /* Clear secpath to prevent invalid dereference
|
|
+ * in downstream XFRM policy checks.
|
|
+ */
|
|
+ secpath_reset(skb);
|
|
return;
|
|
}
|
|
xfrm_state_hold(sa_entry->x);
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
|
|
index d5731f7be04fd1..8278395ee20a01 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
|
|
@@ -1573,6 +1573,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
|
|
unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
|
|
|
|
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg);
|
|
+ skb_shinfo(skb)->gso_segs = lro_num_seg;
|
|
/* Subtract one since we already counted this as one
|
|
* "regular" packet in mlx5e_complete_rx_cqe()
|
|
*/
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
|
|
index 9482e51ac82a58..bdbbfaf504d988 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
|
|
@@ -28,7 +28,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
|
|
|
|
dm = kzalloc(sizeof(*dm), GFP_KERNEL);
|
|
if (!dm)
|
|
- return ERR_PTR(-ENOMEM);
|
|
+ return NULL;
|
|
|
|
spin_lock_init(&dm->lock);
|
|
|
|
@@ -80,7 +80,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
|
|
err_steering:
|
|
kfree(dm);
|
|
|
|
- return ERR_PTR(-ENOMEM);
|
|
+ return NULL;
|
|
}
|
|
|
|
void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
|
|
index 32fa789a696056..62a85f09b52fd7 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
|
|
@@ -1055,9 +1055,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
|
|
}
|
|
|
|
dev->dm = mlx5_dm_create(dev);
|
|
- if (IS_ERR(dev->dm))
|
|
- mlx5_core_warn(dev, "Failed to init device memory %ld\n", PTR_ERR(dev->dm));
|
|
-
|
|
dev->tracer = mlx5_fw_tracer_create(dev);
|
|
dev->hv_vhca = mlx5_hv_vhca_create(dev);
|
|
dev->rsc_dump = mlx5_rsc_dump_create(dev);
|
|
diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
|
|
index 2ff09ce343b73a..2e676b9d4042de 100644
|
|
--- a/drivers/net/ipa/ipa_sysfs.c
|
|
+++ b/drivers/net/ipa/ipa_sysfs.c
|
|
@@ -38,8 +38,12 @@ static const char *ipa_version_string(struct ipa *ipa)
|
|
return "4.11";
|
|
case IPA_VERSION_5_0:
|
|
return "5.0";
|
|
+ case IPA_VERSION_5_1:
|
|
+ return "5.1";
|
|
+ case IPA_VERSION_5_5:
|
|
+ return "5.5";
|
|
default:
|
|
- return "0.0"; /* Won't happen (checked at probe time) */
|
|
+ return "0.0"; /* Should not happen */
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
|
|
index 7e7ce79eadffb9..d0bd6ab45ebed7 100644
|
|
--- a/drivers/net/phy/mscc/mscc_ptp.c
|
|
+++ b/drivers/net/phy/mscc/mscc_ptp.c
|
|
@@ -897,6 +897,7 @@ static int vsc85xx_eth1_conf(struct phy_device *phydev, enum ts_blk blk,
|
|
get_unaligned_be32(ptp_multicast));
|
|
} else {
|
|
val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST;
|
|
+ val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_UNICAST;
|
|
vsc85xx_ts_write_csr(phydev, blk,
|
|
MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
|
|
vsc85xx_ts_write_csr(phydev, blk,
|
|
diff --git a/drivers/net/phy/mscc/mscc_ptp.h b/drivers/net/phy/mscc/mscc_ptp.h
|
|
index da3465360e9018..ae9ad925bfa8c0 100644
|
|
--- a/drivers/net/phy/mscc/mscc_ptp.h
|
|
+++ b/drivers/net/phy/mscc/mscc_ptp.h
|
|
@@ -98,6 +98,7 @@
|
|
#define MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(x) (MSCC_ANA_ETH1_FLOW_ENA(x) + 3)
|
|
#define ANA_ETH1_FLOW_ADDR_MATCH2_MASK_MASK GENMASK(22, 20)
|
|
#define ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST 0x400000
|
|
+#define ANA_ETH1_FLOW_ADDR_MATCH2_ANY_UNICAST 0x200000
|
|
#define ANA_ETH1_FLOW_ADDR_MATCH2_FULL_ADDR 0x100000
|
|
#define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST_MASK GENMASK(17, 16)
|
|
#define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST 0x020000
|
|
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
|
|
index 6833ef0c79305f..3a10303eb756a8 100644
|
|
--- a/drivers/net/ppp/pptp.c
|
|
+++ b/drivers/net/ppp/pptp.c
|
|
@@ -159,19 +159,17 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
|
int len;
|
|
unsigned char *data;
|
|
__u32 seq_recv;
|
|
-
|
|
-
|
|
struct rtable *rt;
|
|
struct net_device *tdev;
|
|
struct iphdr *iph;
|
|
int max_headroom;
|
|
|
|
if (sk_pppox(po)->sk_state & PPPOX_DEAD)
|
|
- goto tx_error;
|
|
+ goto tx_drop;
|
|
|
|
rt = pptp_route_output(po, &fl4);
|
|
if (IS_ERR(rt))
|
|
- goto tx_error;
|
|
+ goto tx_drop;
|
|
|
|
tdev = rt->dst.dev;
|
|
|
|
@@ -179,16 +177,20 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
|
|
|
if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
|
|
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
|
|
- if (!new_skb) {
|
|
- ip_rt_put(rt);
|
|
+
|
|
+ if (!new_skb)
|
|
goto tx_error;
|
|
- }
|
|
+
|
|
if (skb->sk)
|
|
skb_set_owner_w(new_skb, skb->sk);
|
|
consume_skb(skb);
|
|
skb = new_skb;
|
|
}
|
|
|
|
+ /* Ensure we can safely access protocol field and LCP code */
|
|
+ if (!pskb_may_pull(skb, 3))
|
|
+ goto tx_error;
|
|
+
|
|
data = skb->data;
|
|
islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7;
|
|
|
|
@@ -262,6 +264,8 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
|
return 1;
|
|
|
|
tx_error:
|
|
+ ip_rt_put(rt);
|
|
+tx_drop:
|
|
kfree_skb(skb);
|
|
return 1;
|
|
}
|
|
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
|
|
index ac0458b96738c1..fd6b5865ac5135 100644
|
|
--- a/drivers/net/usb/usbnet.c
|
|
+++ b/drivers/net/usb/usbnet.c
|
|
@@ -1113,6 +1113,9 @@ static void __handle_link_change(struct usbnet *dev)
|
|
if (!test_bit(EVENT_DEV_OPEN, &dev->flags))
|
|
return;
|
|
|
|
+ if (test_and_clear_bit(EVENT_LINK_CARRIER_ON, &dev->flags))
|
|
+ netif_carrier_on(dev->net);
|
|
+
|
|
if (!netif_carrier_ok(dev->net)) {
|
|
/* kill URBs for reading packets to save bus bandwidth */
|
|
unlink_urbs(dev, &dev->rxq);
|
|
@@ -2015,10 +2018,12 @@ EXPORT_SYMBOL(usbnet_manage_power);
|
|
void usbnet_link_change(struct usbnet *dev, bool link, bool need_reset)
|
|
{
|
|
/* update link after link is reseted */
|
|
- if (link && !need_reset)
|
|
- netif_carrier_on(dev->net);
|
|
- else
|
|
+ if (link && !need_reset) {
|
|
+ set_bit(EVENT_LINK_CARRIER_ON, &dev->flags);
|
|
+ } else {
|
|
+ clear_bit(EVENT_LINK_CARRIER_ON, &dev->flags);
|
|
netif_carrier_off(dev->net);
|
|
+ }
|
|
|
|
if (need_reset && link)
|
|
usbnet_defer_kevent(dev, EVENT_LINK_RESET);
|
|
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
|
|
index 5968a3ab817707..64114e98d75d0e 100644
|
|
--- a/drivers/net/vrf.c
|
|
+++ b/drivers/net/vrf.c
|
|
@@ -1345,6 +1345,8 @@ static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
|
|
struct net *net = dev_net(vrf_dev);
|
|
struct rt6_info *rt6;
|
|
|
|
+ skb_dst_drop(skb);
|
|
+
|
|
rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, skb,
|
|
RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE);
|
|
if (unlikely(!rt6))
|
|
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
|
|
index f32fa104ded9de..df493d1760623e 100644
|
|
--- a/drivers/net/wireless/ath/ath11k/hal.c
|
|
+++ b/drivers/net/wireless/ath/ath11k/hal.c
|
|
@@ -1319,6 +1319,10 @@ EXPORT_SYMBOL(ath11k_hal_srng_init);
|
|
void ath11k_hal_srng_deinit(struct ath11k_base *ab)
|
|
{
|
|
struct ath11k_hal *hal = &ab->hal;
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++)
|
|
+ ab->hal.srng_list[i].initialized = 0;
|
|
|
|
ath11k_hal_unregister_srng_key(ab);
|
|
ath11k_hal_free_cont_rdp(ab);
|
|
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
|
|
index 958ac4ed5c3491..e918218ce2d607 100644
|
|
--- a/drivers/net/wireless/ath/ath12k/wmi.c
|
|
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
|
|
@@ -6303,7 +6303,7 @@ static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
|
|
void *data)
|
|
{
|
|
const struct wmi_service_available_event *ev;
|
|
- u32 *wmi_ext2_service_bitmap;
|
|
+ __le32 *wmi_ext2_service_bitmap;
|
|
int i, j;
|
|
u16 expected_len;
|
|
|
|
@@ -6335,12 +6335,12 @@ static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
|
|
ev->wmi_service_segment_bitmap[3]);
|
|
break;
|
|
case WMI_TAG_ARRAY_UINT32:
|
|
- wmi_ext2_service_bitmap = (u32 *)ptr;
|
|
+ wmi_ext2_service_bitmap = (__le32 *)ptr;
|
|
for (i = 0, j = WMI_MAX_EXT_SERVICE;
|
|
i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
|
|
i++) {
|
|
do {
|
|
- if (wmi_ext2_service_bitmap[i] &
|
|
+ if (__le32_to_cpu(wmi_ext2_service_bitmap[i]) &
|
|
BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
|
|
set_bit(j, ab->wmi_ab.svc_map);
|
|
} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
|
|
@@ -6348,8 +6348,10 @@ static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
|
|
|
|
ath12k_dbg(ab, ATH12K_DBG_WMI,
|
|
"wmi_ext2_service_bitmap 0x%04x 0x%04x 0x%04x 0x%04x",
|
|
- wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
|
|
- wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
|
|
+ __le32_to_cpu(wmi_ext2_service_bitmap[0]),
|
|
+ __le32_to_cpu(wmi_ext2_service_bitmap[1]),
|
|
+ __le32_to_cpu(wmi_ext2_service_bitmap[2]),
|
|
+ __le32_to_cpu(wmi_ext2_service_bitmap[3]));
|
|
break;
|
|
}
|
|
return 0;
|
|
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
|
|
index c708ae91c3ce93..e883cf80f506d1 100644
|
|
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
|
|
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
|
|
@@ -1541,10 +1541,6 @@ brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
|
|
return -EAGAIN;
|
|
}
|
|
|
|
- /* If scan req comes for p2p0, send it over primary I/F */
|
|
- if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
|
|
- vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
|
|
-
|
|
brcmf_dbg(SCAN, "START ESCAN\n");
|
|
|
|
cfg->scan_request = request;
|
|
@@ -1560,6 +1556,10 @@ brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
|
|
if (err)
|
|
goto scan_out;
|
|
|
|
+ /* If scan req comes for p2p0, send it over primary I/F */
|
|
+ if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
|
|
+ vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
|
|
+
|
|
err = brcmf_do_escan(vif->ifp, request);
|
|
if (err)
|
|
goto scan_out;
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
|
|
index a873be109f4399..b490a88b97ca75 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
|
|
@@ -1048,9 +1048,11 @@ static void iwl_bg_restart(struct work_struct *data)
|
|
*
|
|
*****************************************************************************/
|
|
|
|
-static void iwl_setup_deferred_work(struct iwl_priv *priv)
|
|
+static int iwl_setup_deferred_work(struct iwl_priv *priv)
|
|
{
|
|
priv->workqueue = alloc_ordered_workqueue(DRV_NAME, 0);
|
|
+ if (!priv->workqueue)
|
|
+ return -ENOMEM;
|
|
|
|
INIT_WORK(&priv->restart, iwl_bg_restart);
|
|
INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
|
|
@@ -1067,6 +1069,8 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
|
|
timer_setup(&priv->statistics_periodic, iwl_bg_statistics_periodic, 0);
|
|
|
|
timer_setup(&priv->ucode_trace, iwl_bg_ucode_trace, 0);
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
void iwl_cancel_deferred_work(struct iwl_priv *priv)
|
|
@@ -1456,7 +1460,9 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
|
|
/********************
|
|
* 6. Setup services
|
|
********************/
|
|
- iwl_setup_deferred_work(priv);
|
|
+ if (iwl_setup_deferred_work(priv))
|
|
+ goto out_uninit_drv;
|
|
+
|
|
iwl_setup_rx_handlers(priv);
|
|
|
|
iwl_power_initialize(priv);
|
|
@@ -1494,6 +1500,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
|
|
iwl_cancel_deferred_work(priv);
|
|
destroy_workqueue(priv->workqueue);
|
|
priv->workqueue = NULL;
|
|
+out_uninit_drv:
|
|
iwl_uninit_drv(priv);
|
|
out_free_eeprom_blob:
|
|
kfree(priv->eeprom_blob);
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
|
|
index b2cf5aeff7e3cf..d2dbbc9fe38448 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
|
|
@@ -65,8 +65,10 @@ static int __init iwl_mvm_init(void)
|
|
}
|
|
|
|
ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
pr_err("Unable to register MVM op_mode: %d\n", ret);
|
|
+ iwl_mvm_rate_control_unregister();
|
|
+ }
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
|
|
index c0ecd769ada764..1a48914e6f828a 100644
|
|
--- a/drivers/net/wireless/marvell/mwl8k.c
|
|
+++ b/drivers/net/wireless/marvell/mwl8k.c
|
|
@@ -1222,6 +1222,10 @@ static int rxq_refill(struct ieee80211_hw *hw, int index, int limit)
|
|
|
|
addr = dma_map_single(&priv->pdev->dev, skb->data,
|
|
MWL8K_RX_MAXSZ, DMA_FROM_DEVICE);
|
|
+ if (dma_mapping_error(&priv->pdev->dev, addr)) {
|
|
+ kfree_skb(skb);
|
|
+ break;
|
|
+ }
|
|
|
|
rxq->rxd_count++;
|
|
rx = rxq->tail++;
|
|
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
|
|
index 7ebc0df0944cb5..f9f7532c32fde9 100644
|
|
--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
|
|
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
|
|
@@ -100,11 +100,6 @@ int plfxlc_mac_init_hw(struct ieee80211_hw *hw)
|
|
return r;
|
|
}
|
|
|
|
-void plfxlc_mac_release(struct plfxlc_mac *mac)
|
|
-{
|
|
- plfxlc_chip_release(&mac->chip);
|
|
-}
|
|
-
|
|
int plfxlc_op_start(struct ieee80211_hw *hw)
|
|
{
|
|
plfxlc_hw_mac(hw)->chip.usb.initialized = 1;
|
|
@@ -752,3 +747,9 @@ struct ieee80211_hw *plfxlc_mac_alloc_hw(struct usb_interface *intf)
|
|
SET_IEEE80211_DEV(hw, &intf->dev);
|
|
return hw;
|
|
}
|
|
+
|
|
+void plfxlc_mac_release_hw(struct ieee80211_hw *hw)
|
|
+{
|
|
+ plfxlc_chip_release(&plfxlc_hw_mac(hw)->chip);
|
|
+ ieee80211_free_hw(hw);
|
|
+}
|
|
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.h b/drivers/net/wireless/purelifi/plfxlc/mac.h
|
|
index 49b92413729bfa..c0445932b2e8a5 100644
|
|
--- a/drivers/net/wireless/purelifi/plfxlc/mac.h
|
|
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.h
|
|
@@ -168,7 +168,7 @@ static inline u8 *plfxlc_mac_get_perm_addr(struct plfxlc_mac *mac)
|
|
}
|
|
|
|
struct ieee80211_hw *plfxlc_mac_alloc_hw(struct usb_interface *intf);
|
|
-void plfxlc_mac_release(struct plfxlc_mac *mac);
|
|
+void plfxlc_mac_release_hw(struct ieee80211_hw *hw);
|
|
|
|
int plfxlc_mac_preinit_hw(struct ieee80211_hw *hw, const u8 *hw_address);
|
|
int plfxlc_mac_init_hw(struct ieee80211_hw *hw);
|
|
diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c
|
|
index 8151bc5e00ccc8..901e0139969e8d 100644
|
|
--- a/drivers/net/wireless/purelifi/plfxlc/usb.c
|
|
+++ b/drivers/net/wireless/purelifi/plfxlc/usb.c
|
|
@@ -604,7 +604,7 @@ static int probe(struct usb_interface *intf,
|
|
r = plfxlc_upload_mac_and_serial(intf, hw_address, serial_number);
|
|
if (r) {
|
|
dev_err(&intf->dev, "MAC and Serial upload failed (%d)\n", r);
|
|
- goto error;
|
|
+ goto error_free_hw;
|
|
}
|
|
|
|
chip->unit_type = STA;
|
|
@@ -613,13 +613,13 @@ static int probe(struct usb_interface *intf,
|
|
r = plfxlc_mac_preinit_hw(hw, hw_address);
|
|
if (r) {
|
|
dev_err(&intf->dev, "Init mac failed (%d)\n", r);
|
|
- goto error;
|
|
+ goto error_free_hw;
|
|
}
|
|
|
|
r = ieee80211_register_hw(hw);
|
|
if (r) {
|
|
dev_err(&intf->dev, "Register device failed (%d)\n", r);
|
|
- goto error;
|
|
+ goto error_free_hw;
|
|
}
|
|
|
|
if ((le16_to_cpu(interface_to_usbdev(intf)->descriptor.idVendor) ==
|
|
@@ -632,7 +632,7 @@ static int probe(struct usb_interface *intf,
|
|
}
|
|
if (r != 0) {
|
|
dev_err(&intf->dev, "FPGA download failed (%d)\n", r);
|
|
- goto error;
|
|
+ goto error_unreg_hw;
|
|
}
|
|
|
|
tx->mac_fifo_full = 0;
|
|
@@ -642,21 +642,21 @@ static int probe(struct usb_interface *intf,
|
|
r = plfxlc_usb_init_hw(usb);
|
|
if (r < 0) {
|
|
dev_err(&intf->dev, "usb_init_hw failed (%d)\n", r);
|
|
- goto error;
|
|
+ goto error_unreg_hw;
|
|
}
|
|
|
|
msleep(PLF_MSLEEP_TIME);
|
|
r = plfxlc_chip_switch_radio(chip, PLFXLC_RADIO_ON);
|
|
if (r < 0) {
|
|
dev_dbg(&intf->dev, "chip_switch_radio_on failed (%d)\n", r);
|
|
- goto error;
|
|
+ goto error_unreg_hw;
|
|
}
|
|
|
|
msleep(PLF_MSLEEP_TIME);
|
|
r = plfxlc_chip_set_rate(chip, 8);
|
|
if (r < 0) {
|
|
dev_dbg(&intf->dev, "chip_set_rate failed (%d)\n", r);
|
|
- goto error;
|
|
+ goto error_unreg_hw;
|
|
}
|
|
|
|
msleep(PLF_MSLEEP_TIME);
|
|
@@ -664,7 +664,7 @@ static int probe(struct usb_interface *intf,
|
|
hw_address, ETH_ALEN, USB_REQ_MAC_WR);
|
|
if (r < 0) {
|
|
dev_dbg(&intf->dev, "MAC_WR failure (%d)\n", r);
|
|
- goto error;
|
|
+ goto error_unreg_hw;
|
|
}
|
|
|
|
plfxlc_chip_enable_rxtx(chip);
|
|
@@ -691,12 +691,12 @@ static int probe(struct usb_interface *intf,
|
|
plfxlc_mac_init_hw(hw);
|
|
usb->initialized = true;
|
|
return 0;
|
|
+
|
|
+error_unreg_hw:
|
|
+ ieee80211_unregister_hw(hw);
|
|
+error_free_hw:
|
|
+ plfxlc_mac_release_hw(hw);
|
|
error:
|
|
- if (hw) {
|
|
- plfxlc_mac_release(plfxlc_hw_mac(hw));
|
|
- ieee80211_unregister_hw(hw);
|
|
- ieee80211_free_hw(hw);
|
|
- }
|
|
dev_err(&intf->dev, "pureLifi:Device error");
|
|
return r;
|
|
}
|
|
@@ -730,8 +730,7 @@ static void disconnect(struct usb_interface *intf)
|
|
*/
|
|
usb_reset_device(interface_to_usbdev(intf));
|
|
|
|
- plfxlc_mac_release(mac);
|
|
- ieee80211_free_hw(hw);
|
|
+ plfxlc_mac_release_hw(hw);
|
|
}
|
|
|
|
static void plfxlc_usb_resume(struct plfxlc_usb *usb)
|
|
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
|
|
index 04945f905d6d06..f6528469022bfb 100644
|
|
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
|
|
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
|
|
@@ -1041,10 +1041,11 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
|
|
rtl818x_iowrite8(priv, &priv->map->CONFIG4, reg | RTL818X_CONFIG4_VCOOFF);
|
|
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
|
|
|
|
+ usb_kill_anchored_urbs(&priv->anchored);
|
|
+
|
|
while ((skb = skb_dequeue(&priv->b_tx_status.queue)))
|
|
dev_kfree_skb_any(skb);
|
|
|
|
- usb_kill_anchored_urbs(&priv->anchored);
|
|
mutex_unlock(&priv->conf_mutex);
|
|
|
|
if (!priv->is_rtl8187b)
|
|
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
|
|
index 05e77d2bda3738..03aacb7a431710 100644
|
|
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
|
|
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
|
|
@@ -6501,7 +6501,7 @@ static int rtl8xxxu_submit_rx_urb(struct rtl8xxxu_priv *priv,
|
|
skb_size = fops->rx_agg_buf_size;
|
|
skb_size += (rx_desc_sz + sizeof(struct rtl8723au_phy_stats));
|
|
} else {
|
|
- skb_size = IEEE80211_MAX_FRAME_LEN;
|
|
+ skb_size = IEEE80211_MAX_FRAME_LEN + rx_desc_sz;
|
|
}
|
|
|
|
skb = __netdev_alloc_skb(NULL, skb_size, GFP_KERNEL);
|
|
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
|
|
index d1d8fd812cbf6d..21e9ec8768b5dc 100644
|
|
--- a/drivers/net/wireless/realtek/rtw89/core.c
|
|
+++ b/drivers/net/wireless/realtek/rtw89/core.c
|
|
@@ -1597,6 +1597,11 @@ static void rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev *rtwdev,
|
|
if (rx_status->band != NL80211_BAND_6GHZ)
|
|
return;
|
|
|
|
+ if (unlikely(!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ)))) {
|
|
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "invalid rx on unsupported 6 GHz\n");
|
|
+ return;
|
|
+ }
|
|
+
|
|
ssid_ie = cfg80211_find_ie(WLAN_EID_SSID, ies, skb->len);
|
|
|
|
list_for_each_entry(info, &pkt_list[NL80211_BAND_6GHZ], list) {
|
|
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
|
|
index afbbdccd195d97..6ff20d58539662 100644
|
|
--- a/drivers/pci/controller/pcie-rockchip-host.c
|
|
+++ b/drivers/pci/controller/pcie-rockchip-host.c
|
|
@@ -436,7 +436,7 @@ static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
|
|
dev_dbg(dev, "malformed TLP received from the link\n");
|
|
|
|
if (sub_reg & PCIE_CORE_INT_UCR)
|
|
- dev_dbg(dev, "malformed TLP received from the link\n");
|
|
+ dev_dbg(dev, "Unexpected Completion received from the link\n");
|
|
|
|
if (sub_reg & PCIE_CORE_INT_FCE)
|
|
dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
|
|
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
|
|
index 3368f483f818df..33c3f9b980e684 100644
|
|
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
|
|
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
|
|
@@ -531,7 +531,7 @@ static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
|
|
struct device *dev = &ntb->epf->dev;
|
|
int ret;
|
|
struct pci_epf_bar *epf_bar;
|
|
- void __iomem *mw_addr;
|
|
+ void *mw_addr;
|
|
enum pci_barno barno;
|
|
size_t size = sizeof(u32) * ntb->db_count;
|
|
|
|
@@ -711,7 +711,7 @@ static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
|
|
barno = pci_epc_get_next_free_bar(epc_features, barno);
|
|
if (barno < 0) {
|
|
dev_err(dev, "Fail to get NTB function BAR\n");
|
|
- return barno;
|
|
+ return -ENOENT;
|
|
}
|
|
ntb->epf_ntb_bar[bar] = barno;
|
|
}
|
|
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
|
|
index 092c9ac0d26d27..ec7828ad666179 100644
|
|
--- a/drivers/pci/hotplug/pnv_php.c
|
|
+++ b/drivers/pci/hotplug/pnv_php.c
|
|
@@ -3,11 +3,14 @@
|
|
* PCI Hotplug Driver for PowerPC PowerNV platform.
|
|
*
|
|
* Copyright Gavin Shan, IBM Corporation 2016.
|
|
+ * Copyright (C) 2025 Raptor Engineering, LLC
|
|
+ * Copyright (C) 2025 Raptor Computing Systems, LLC
|
|
*/
|
|
|
|
#include <linux/libfdt.h>
|
|
#include <linux/module.h>
|
|
#include <linux/pci.h>
|
|
+#include <linux/delay.h>
|
|
#include <linux/pci_hotplug.h>
|
|
#include <linux/of_fdt.h>
|
|
|
|
@@ -35,8 +38,10 @@ static void pnv_php_register(struct device_node *dn);
|
|
static void pnv_php_unregister_one(struct device_node *dn);
|
|
static void pnv_php_unregister(struct device_node *dn);
|
|
|
|
+static void pnv_php_enable_irq(struct pnv_php_slot *php_slot);
|
|
+
|
|
static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
|
|
- bool disable_device)
|
|
+ bool disable_device, bool disable_msi)
|
|
{
|
|
struct pci_dev *pdev = php_slot->pdev;
|
|
u16 ctrl;
|
|
@@ -52,19 +57,15 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
|
|
php_slot->irq = 0;
|
|
}
|
|
|
|
- if (php_slot->wq) {
|
|
- destroy_workqueue(php_slot->wq);
|
|
- php_slot->wq = NULL;
|
|
- }
|
|
-
|
|
- if (disable_device) {
|
|
+ if (disable_device || disable_msi) {
|
|
if (pdev->msix_enabled)
|
|
pci_disable_msix(pdev);
|
|
else if (pdev->msi_enabled)
|
|
pci_disable_msi(pdev);
|
|
+ }
|
|
|
|
+ if (disable_device)
|
|
pci_disable_device(pdev);
|
|
- }
|
|
}
|
|
|
|
static void pnv_php_free_slot(struct kref *kref)
|
|
@@ -73,7 +74,8 @@ static void pnv_php_free_slot(struct kref *kref)
|
|
struct pnv_php_slot, kref);
|
|
|
|
WARN_ON(!list_empty(&php_slot->children));
|
|
- pnv_php_disable_irq(php_slot, false);
|
|
+ pnv_php_disable_irq(php_slot, false, false);
|
|
+ destroy_workqueue(php_slot->wq);
|
|
kfree(php_slot->name);
|
|
kfree(php_slot);
|
|
}
|
|
@@ -390,6 +392,20 @@ static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
|
|
return 0;
|
|
}
|
|
|
|
+static int pcie_check_link_active(struct pci_dev *pdev)
|
|
+{
|
|
+ u16 lnk_status;
|
|
+ int ret;
|
|
+
|
|
+ ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
|
|
+ if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(lnk_status))
|
|
+ return -ENODEV;
|
|
+
|
|
+ ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
|
|
{
|
|
struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
|
|
@@ -402,6 +418,19 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
|
|
*/
|
|
ret = pnv_pci_get_presence_state(php_slot->id, &presence);
|
|
if (ret >= 0) {
|
|
+ if (pci_pcie_type(php_slot->pdev) == PCI_EXP_TYPE_DOWNSTREAM &&
|
|
+ presence == OPAL_PCI_SLOT_EMPTY) {
|
|
+ /*
|
|
+ * Similar to pciehp_hpc, check whether the Link Active
|
|
+ * bit is set to account for broken downstream bridges
|
|
+ * that don't properly assert Presence Detect State, as
|
|
+ * was observed on the Microsemi Switchtec PM8533 PFX
|
|
+ * [11f8:8533].
|
|
+ */
|
|
+ if (pcie_check_link_active(php_slot->pdev) > 0)
|
|
+ presence = OPAL_PCI_SLOT_PRESENT;
|
|
+ }
|
|
+
|
|
*state = presence;
|
|
ret = 0;
|
|
} else {
|
|
@@ -441,6 +470,61 @@ static int pnv_php_set_attention_state(struct hotplug_slot *slot, u8 state)
|
|
return 0;
|
|
}
|
|
|
|
+static int pnv_php_activate_slot(struct pnv_php_slot *php_slot,
|
|
+ struct hotplug_slot *slot)
|
|
+{
|
|
+ int ret, i;
|
|
+
|
|
+ /*
|
|
+ * Issue initial slot activation command to firmware
|
|
+ *
|
|
+ * Firmware will power slot on, attempt to train the link, and
|
|
+ * discover any downstream devices. If this process fails, firmware
|
|
+ * will return an error code and an invalid device tree. Failure
|
|
+ * can be caused for multiple reasons, including a faulty
|
|
+ * downstream device, poor connection to the downstream device, or
|
|
+ * a previously latched PHB fence. On failure, issue fundamental
|
|
+ * reset up to three times before aborting.
|
|
+ */
|
|
+ ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_ON);
|
|
+ if (ret) {
|
|
+ SLOT_WARN(
|
|
+ php_slot,
|
|
+ "PCI slot activation failed with error code %d, possible frozen PHB",
|
|
+ ret);
|
|
+ SLOT_WARN(
|
|
+ php_slot,
|
|
+ "Attempting complete PHB reset before retrying slot activation\n");
|
|
+ for (i = 0; i < 3; i++) {
|
|
+ /*
|
|
+ * Slot activation failed, PHB may be fenced from a
|
|
+ * prior device failure.
|
|
+ *
|
|
+ * Use the OPAL fundamental reset call to both try a
|
|
+ * device reset and clear any potentially active PHB
|
|
+ * fence / freeze.
|
|
+ */
|
|
+ SLOT_WARN(php_slot, "Try %d...\n", i + 1);
|
|
+ pci_set_pcie_reset_state(php_slot->pdev,
|
|
+ pcie_warm_reset);
|
|
+ msleep(250);
|
|
+ pci_set_pcie_reset_state(php_slot->pdev,
|
|
+ pcie_deassert_reset);
|
|
+
|
|
+ ret = pnv_php_set_slot_power_state(
|
|
+ slot, OPAL_PCI_SLOT_POWER_ON);
|
|
+ if (!ret)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (i >= 3)
|
|
+ SLOT_WARN(php_slot,
|
|
+ "Failed to bring slot online, aborting!\n");
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
|
|
{
|
|
struct hotplug_slot *slot = &php_slot->slot;
|
|
@@ -503,7 +587,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
|
|
goto scan;
|
|
|
|
/* Power is off, turn it on and then scan the slot */
|
|
- ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_ON);
|
|
+ ret = pnv_php_activate_slot(php_slot, slot);
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -560,8 +644,58 @@ static int pnv_php_reset_slot(struct hotplug_slot *slot, bool probe)
|
|
static int pnv_php_enable_slot(struct hotplug_slot *slot)
|
|
{
|
|
struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
|
|
+ u32 prop32;
|
|
+ int ret;
|
|
+
|
|
+ ret = pnv_php_enable(php_slot, true);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ /* (Re-)enable interrupt if the slot supports surprise hotplug */
|
|
+ ret = of_property_read_u32(php_slot->dn, "ibm,slot-surprise-pluggable",
|
|
+ &prop32);
|
|
+ if (!ret && prop32)
|
|
+ pnv_php_enable_irq(php_slot);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Disable any hotplug interrupts for all slots on the provided bus, as well as
|
|
+ * all downstream slots in preparation for a hot unplug.
|
|
+ */
|
|
+static int pnv_php_disable_all_irqs(struct pci_bus *bus)
|
|
+{
|
|
+ struct pci_bus *child_bus;
|
|
+ struct pci_slot *slot;
|
|
+
|
|
+ /* First go down child buses */
|
|
+ list_for_each_entry(child_bus, &bus->children, node)
|
|
+ pnv_php_disable_all_irqs(child_bus);
|
|
+
|
|
+ /* Disable IRQs for all pnv_php slots on this bus */
|
|
+ list_for_each_entry(slot, &bus->slots, list) {
|
|
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot->hotplug);
|
|
|
|
- return pnv_php_enable(php_slot, true);
|
|
+ pnv_php_disable_irq(php_slot, false, true);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Disable any hotplug interrupts for all downstream slots on the provided
|
|
+ * bus in preparation for a hot unplug.
|
|
+ */
|
|
+static int pnv_php_disable_all_downstream_irqs(struct pci_bus *bus)
|
|
+{
|
|
+ struct pci_bus *child_bus;
|
|
+
|
|
+ /* Go down child buses, recursively deactivating their IRQs */
|
|
+ list_for_each_entry(child_bus, &bus->children, node)
|
|
+ pnv_php_disable_all_irqs(child_bus);
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
static int pnv_php_disable_slot(struct hotplug_slot *slot)
|
|
@@ -578,6 +712,13 @@ static int pnv_php_disable_slot(struct hotplug_slot *slot)
|
|
php_slot->state != PNV_PHP_STATE_REGISTERED)
|
|
return 0;
|
|
|
|
+ /*
|
|
+ * Free all IRQ resources from all child slots before remove.
|
|
+ * Note that we do not disable the root slot IRQ here as that
|
|
+ * would also deactivate the slot hot (re)plug interrupt!
|
|
+ */
|
|
+ pnv_php_disable_all_downstream_irqs(php_slot->bus);
|
|
+
|
|
/* Remove all devices behind the slot */
|
|
pci_lock_rescan_remove();
|
|
pci_hp_remove_devices(php_slot->bus);
|
|
@@ -646,6 +787,15 @@ static struct pnv_php_slot *pnv_php_alloc_slot(struct device_node *dn)
|
|
return NULL;
|
|
}
|
|
|
|
+ /* Allocate workqueue for this slot's interrupt handling */
|
|
+ php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name);
|
|
+ if (!php_slot->wq) {
|
|
+ SLOT_WARN(php_slot, "Cannot alloc workqueue\n");
|
|
+ kfree(php_slot->name);
|
|
+ kfree(php_slot);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
if (dn->child && PCI_DN(dn->child))
|
|
php_slot->slot_no = PCI_SLOT(PCI_DN(dn->child)->devfn);
|
|
else
|
|
@@ -744,16 +894,63 @@ static int pnv_php_enable_msix(struct pnv_php_slot *php_slot)
|
|
return entry.vector;
|
|
}
|
|
|
|
+static void
|
|
+pnv_php_detect_clear_suprise_removal_freeze(struct pnv_php_slot *php_slot)
|
|
+{
|
|
+ struct pci_dev *pdev = php_slot->pdev;
|
|
+ struct eeh_dev *edev;
|
|
+ struct eeh_pe *pe;
|
|
+ int i, rc;
|
|
+
|
|
+ /*
|
|
+ * When a device is surprise removed from a downstream bridge slot,
|
|
+ * the upstream bridge port can still end up frozen due to related EEH
|
|
+ * events, which will in turn block the MSI interrupts for slot hotplug
|
|
+ * detection.
|
|
+ *
|
|
+ * Detect and thaw any frozen upstream PE after slot deactivation.
|
|
+ */
|
|
+ edev = pci_dev_to_eeh_dev(pdev);
|
|
+ pe = edev ? edev->pe : NULL;
|
|
+ rc = eeh_pe_get_state(pe);
|
|
+ if ((rc == -ENODEV) || (rc == -ENOENT)) {
|
|
+ SLOT_WARN(
|
|
+ php_slot,
|
|
+ "Upstream bridge PE state unknown, hotplug detect may fail\n");
|
|
+ } else {
|
|
+ if (pe->state & EEH_PE_ISOLATED) {
|
|
+ SLOT_WARN(
|
|
+ php_slot,
|
|
+ "Upstream bridge PE %02x frozen, thawing...\n",
|
|
+ pe->addr);
|
|
+ for (i = 0; i < 3; i++)
|
|
+ if (!eeh_unfreeze_pe(pe))
|
|
+ break;
|
|
+ if (i >= 3)
|
|
+ SLOT_WARN(
|
|
+ php_slot,
|
|
+ "Unable to thaw PE %02x, hotplug detect will fail!\n",
|
|
+ pe->addr);
|
|
+ else
|
|
+ SLOT_WARN(php_slot,
|
|
+ "PE %02x thawed successfully\n",
|
|
+ pe->addr);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
static void pnv_php_event_handler(struct work_struct *work)
|
|
{
|
|
struct pnv_php_event *event =
|
|
container_of(work, struct pnv_php_event, work);
|
|
struct pnv_php_slot *php_slot = event->php_slot;
|
|
|
|
- if (event->added)
|
|
+ if (event->added) {
|
|
pnv_php_enable_slot(&php_slot->slot);
|
|
- else
|
|
+ } else {
|
|
pnv_php_disable_slot(&php_slot->slot);
|
|
+ pnv_php_detect_clear_suprise_removal_freeze(php_slot);
|
|
+ }
|
|
|
|
kfree(event);
|
|
}
|
|
@@ -842,14 +1039,6 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
|
|
u16 sts, ctrl;
|
|
int ret;
|
|
|
|
- /* Allocate workqueue */
|
|
- php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name);
|
|
- if (!php_slot->wq) {
|
|
- SLOT_WARN(php_slot, "Cannot alloc workqueue\n");
|
|
- pnv_php_disable_irq(php_slot, true);
|
|
- return;
|
|
- }
|
|
-
|
|
/* Check PDC (Presence Detection Change) is broken or not */
|
|
ret = of_property_read_u32(php_slot->dn, "ibm,slot-broken-pdc",
|
|
&broken_pdc);
|
|
@@ -868,7 +1057,7 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
|
|
ret = request_irq(irq, pnv_php_interrupt, IRQF_SHARED,
|
|
php_slot->name, php_slot);
|
|
if (ret) {
|
|
- pnv_php_disable_irq(php_slot, true);
|
|
+ pnv_php_disable_irq(php_slot, true, true);
|
|
SLOT_WARN(php_slot, "Error %d enabling IRQ %d\n", ret, irq);
|
|
return;
|
|
}
|
|
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
|
|
index 97e8af88df851b..ab853d6c586b60 100644
|
|
--- a/drivers/pinctrl/pinmux.c
|
|
+++ b/drivers/pinctrl/pinmux.c
|
|
@@ -238,18 +238,7 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
|
|
if (desc->mux_usecount)
|
|
return NULL;
|
|
}
|
|
- }
|
|
-
|
|
- /*
|
|
- * If there is no kind of request function for the pin we just assume
|
|
- * we got it by default and proceed.
|
|
- */
|
|
- if (gpio_range && ops->gpio_disable_free)
|
|
- ops->gpio_disable_free(pctldev, gpio_range, pin);
|
|
- else if (ops->free)
|
|
- ops->free(pctldev, pin);
|
|
|
|
- scoped_guard(mutex, &desc->mux_lock) {
|
|
if (gpio_range) {
|
|
owner = desc->gpio_owner;
|
|
desc->gpio_owner = NULL;
|
|
@@ -260,6 +249,15 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
|
|
}
|
|
}
|
|
|
|
+ /*
|
|
+ * If there is no kind of request function for the pin we just assume
|
|
+ * we got it by default and proceed.
|
|
+ */
|
|
+ if (gpio_range && ops->gpio_disable_free)
|
|
+ ops->gpio_disable_free(pctldev, gpio_range, pin);
|
|
+ else if (ops->free)
|
|
+ ops->free(pctldev, pin);
|
|
+
|
|
module_put(pctldev->owner);
|
|
|
|
return owner;
|
|
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
|
|
index 73bcf806af0ec2..fc11c3d55fa8c0 100644
|
|
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
|
|
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
|
|
@@ -395,6 +395,7 @@ static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
|
|
const char *function, *pin_prop;
|
|
const char *group;
|
|
int ret, npins, nmaps, configlen = 0, i = 0;
|
|
+ struct pinctrl_map *new_map;
|
|
|
|
*map = NULL;
|
|
*num_maps = 0;
|
|
@@ -469,9 +470,13 @@ static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
|
|
* We know have the number of maps we need, we can resize our
|
|
* map array
|
|
*/
|
|
- *map = krealloc(*map, i * sizeof(struct pinctrl_map), GFP_KERNEL);
|
|
- if (!*map)
|
|
- return -ENOMEM;
|
|
+ new_map = krealloc(*map, i * sizeof(struct pinctrl_map), GFP_KERNEL);
|
|
+ if (!new_map) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err_free_map;
|
|
+ }
|
|
+
|
|
+ *map = new_map;
|
|
|
|
return 0;
|
|
|
|
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
|
|
index be9764541d52a5..05a3f4b208a420 100644
|
|
--- a/drivers/power/supply/cpcap-charger.c
|
|
+++ b/drivers/power/supply/cpcap-charger.c
|
|
@@ -689,9 +689,8 @@ static void cpcap_usb_detect(struct work_struct *work)
|
|
struct power_supply *battery;
|
|
|
|
battery = power_supply_get_by_name("battery");
|
|
- if (IS_ERR_OR_NULL(battery)) {
|
|
- dev_err(ddata->dev, "battery power_supply not available %li\n",
|
|
- PTR_ERR(battery));
|
|
+ if (!battery) {
|
|
+ dev_err(ddata->dev, "battery power_supply not available\n");
|
|
return;
|
|
}
|
|
|
|
diff --git a/drivers/power/supply/max14577_charger.c b/drivers/power/supply/max14577_charger.c
|
|
index 96f9de775043ac..daa5a6d41bb48b 100644
|
|
--- a/drivers/power/supply/max14577_charger.c
|
|
+++ b/drivers/power/supply/max14577_charger.c
|
|
@@ -501,7 +501,7 @@ static struct max14577_charger_platform_data *max14577_charger_dt_init(
|
|
static struct max14577_charger_platform_data *max14577_charger_dt_init(
|
|
struct platform_device *pdev)
|
|
{
|
|
- return NULL;
|
|
+ return ERR_PTR(-ENODATA);
|
|
}
|
|
#endif /* CONFIG_OF */
|
|
|
|
@@ -572,7 +572,7 @@ static int max14577_charger_probe(struct platform_device *pdev)
|
|
chg->max14577 = max14577;
|
|
|
|
chg->pdata = max14577_charger_dt_init(pdev);
|
|
- if (IS_ERR_OR_NULL(chg->pdata))
|
|
+ if (IS_ERR(chg->pdata))
|
|
return PTR_ERR(chg->pdata);
|
|
|
|
ret = max14577_charger_reg_init(chg);
|
|
diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
|
|
index ae7ee611978ba1..99a82060ead984 100644
|
|
--- a/drivers/powercap/dtpm_cpu.c
|
|
+++ b/drivers/powercap/dtpm_cpu.c
|
|
@@ -93,6 +93,8 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
|
|
int i;
|
|
|
|
pd = em_cpu_get(dtpm_cpu->cpu);
|
|
+ if (!pd)
|
|
+ return 0;
|
|
|
|
pd_mask = em_span_cpus(pd);
|
|
|
|
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
|
|
index 63f96357eb9fd8..e1689957736d95 100644
|
|
--- a/drivers/pps/pps.c
|
|
+++ b/drivers/pps/pps.c
|
|
@@ -41,6 +41,9 @@ static __poll_t pps_cdev_poll(struct file *file, poll_table *wait)
|
|
|
|
poll_wait(file, &pps->queue, wait);
|
|
|
|
+ if (pps->last_fetched_ev == pps->last_ev)
|
|
+ return 0;
|
|
+
|
|
return EPOLLIN | EPOLLRDNORM;
|
|
}
|
|
|
|
@@ -186,9 +189,11 @@ static long pps_cdev_ioctl(struct file *file,
|
|
if (err)
|
|
return err;
|
|
|
|
- /* Return the fetched timestamp */
|
|
+ /* Return the fetched timestamp and save last fetched event */
|
|
spin_lock_irq(&pps->lock);
|
|
|
|
+ pps->last_fetched_ev = pps->last_ev;
|
|
+
|
|
fdata.info.assert_sequence = pps->assert_sequence;
|
|
fdata.info.clear_sequence = pps->clear_sequence;
|
|
fdata.info.assert_tu = pps->assert_tu;
|
|
@@ -272,9 +277,11 @@ static long pps_cdev_compat_ioctl(struct file *file,
|
|
if (err)
|
|
return err;
|
|
|
|
- /* Return the fetched timestamp */
|
|
+ /* Return the fetched timestamp and save last fetched event */
|
|
spin_lock_irq(&pps->lock);
|
|
|
|
+ pps->last_fetched_ev = pps->last_ev;
|
|
+
|
|
compat.info.assert_sequence = pps->assert_sequence;
|
|
compat.info.clear_sequence = pps->clear_sequence;
|
|
compat.info.current_mode = pps->current_mode;
|
|
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
|
|
index 0c78451960926d..e14981383c0125 100644
|
|
--- a/drivers/rtc/rtc-ds1307.c
|
|
+++ b/drivers/rtc/rtc-ds1307.c
|
|
@@ -1461,7 +1461,7 @@ static long ds3231_clk_sqw_round_rate(struct clk_hw *hw, unsigned long rate,
|
|
return ds3231_clk_sqw_rates[i];
|
|
}
|
|
|
|
- return 0;
|
|
+ return ds3231_clk_sqw_rates[ARRAY_SIZE(ds3231_clk_sqw_rates) - 1];
|
|
}
|
|
|
|
static int ds3231_clk_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
|
|
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
|
|
index b018535c842b37..b873918042e33b 100644
|
|
--- a/drivers/rtc/rtc-hym8563.c
|
|
+++ b/drivers/rtc/rtc-hym8563.c
|
|
@@ -294,7 +294,7 @@ static long hym8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
|
|
if (clkout_rates[i] <= rate)
|
|
return clkout_rates[i];
|
|
|
|
- return 0;
|
|
+ return clkout_rates[0];
|
|
}
|
|
|
|
static int hym8563_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
|
|
diff --git a/drivers/rtc/rtc-nct3018y.c b/drivers/rtc/rtc-nct3018y.c
|
|
index c4533c0f538967..5e7a8c7ad58c5f 100644
|
|
--- a/drivers/rtc/rtc-nct3018y.c
|
|
+++ b/drivers/rtc/rtc-nct3018y.c
|
|
@@ -342,7 +342,7 @@ static long nct3018y_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
|
|
if (clkout_rates[i] <= rate)
|
|
return clkout_rates[i];
|
|
|
|
- return 0;
|
|
+ return clkout_rates[0];
|
|
}
|
|
|
|
static int nct3018y_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
|
|
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
|
|
index 73848f764559b4..2b4921c23467d7 100644
|
|
--- a/drivers/rtc/rtc-pcf85063.c
|
|
+++ b/drivers/rtc/rtc-pcf85063.c
|
|
@@ -410,7 +410,7 @@ static long pcf85063_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
|
|
if (clkout_rates[i] <= rate)
|
|
return clkout_rates[i];
|
|
|
|
- return 0;
|
|
+ return clkout_rates[0];
|
|
}
|
|
|
|
static int pcf85063_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
|
|
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
|
|
index ea82b89d8929f9..61be91c25cf182 100644
|
|
--- a/drivers/rtc/rtc-pcf8563.c
|
|
+++ b/drivers/rtc/rtc-pcf8563.c
|
|
@@ -386,7 +386,7 @@ static long pcf8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
|
|
if (clkout_rates[i] <= rate)
|
|
return clkout_rates[i];
|
|
|
|
- return 0;
|
|
+ return clkout_rates[0];
|
|
}
|
|
|
|
static int pcf8563_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
|
|
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
|
|
index 2f001c59c61d54..86b7f821e937b2 100644
|
|
--- a/drivers/rtc/rtc-rv3028.c
|
|
+++ b/drivers/rtc/rtc-rv3028.c
|
|
@@ -738,7 +738,7 @@ static long rv3028_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
|
|
if (clkout_rates[i] <= rate)
|
|
return clkout_rates[i];
|
|
|
|
- return 0;
|
|
+ return clkout_rates[0];
|
|
}
|
|
|
|
static int rv3028_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
|
|
diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c
|
|
index a982b9cf987017..49d7fe150684bc 100644
|
|
--- a/drivers/scsi/elx/efct/efct_lio.c
|
|
+++ b/drivers/scsi/elx/efct/efct_lio.c
|
|
@@ -382,7 +382,7 @@ efct_lio_sg_unmap(struct efct_io *io)
|
|
return;
|
|
|
|
dma_unmap_sg(&io->efct->pci->dev, cmd->t_data_sg,
|
|
- ocp->seg_map_cnt, cmd->data_direction);
|
|
+ cmd->t_data_nents, cmd->data_direction);
|
|
ocp->seg_map_cnt = 0;
|
|
}
|
|
|
|
diff --git a/drivers/scsi/ibmvscsi_tgt/libsrp.c b/drivers/scsi/ibmvscsi_tgt/libsrp.c
|
|
index 8a0e28aec928e4..0ecad398ed3db0 100644
|
|
--- a/drivers/scsi/ibmvscsi_tgt/libsrp.c
|
|
+++ b/drivers/scsi/ibmvscsi_tgt/libsrp.c
|
|
@@ -184,7 +184,8 @@ static int srp_direct_data(struct ibmvscsis_cmd *cmd, struct srp_direct_buf *md,
|
|
err = rdma_io(cmd, sg, nsg, md, 1, dir, len);
|
|
|
|
if (dma_map)
|
|
- dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
|
|
+ dma_unmap_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents,
|
|
+ DMA_BIDIRECTIONAL);
|
|
|
|
return err;
|
|
}
|
|
@@ -256,7 +257,8 @@ static int srp_indirect_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd,
|
|
err = rdma_io(cmd, sg, nsg, md, nmd, dir, len);
|
|
|
|
if (dma_map)
|
|
- dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
|
|
+ dma_unmap_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents,
|
|
+ DMA_BIDIRECTIONAL);
|
|
|
|
free_mem:
|
|
if (token && dma_map) {
|
|
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
|
|
index 7162a5029b37a8..3841db665fe4bd 100644
|
|
--- a/drivers/scsi/isci/request.c
|
|
+++ b/drivers/scsi/isci/request.c
|
|
@@ -2907,7 +2907,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
|
|
task->total_xfer_len, task->data_dir);
|
|
else /* unmap the sgl dma addresses */
|
|
dma_unmap_sg(&ihost->pdev->dev, task->scatter,
|
|
- request->num_sg_entries, task->data_dir);
|
|
+ task->num_scatter, task->data_dir);
|
|
break;
|
|
case SAS_PROTOCOL_SMP: {
|
|
struct scatterlist *sg = &task->smp_task.smp_req;
|
|
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
|
index f270b0d829f6ea..0afa485fb300ca 100644
|
|
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
|
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
|
@@ -10818,8 +10818,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
|
|
break;
|
|
case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
|
|
_scsih_pcie_topology_change_event(ioc, fw_event);
|
|
- ioc->current_event = NULL;
|
|
- return;
|
|
+ break;
|
|
}
|
|
out:
|
|
fw_event_work_put(fw_event);
|
|
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
|
|
index 1444b1f1c4c886..d6897432cf0f56 100644
|
|
--- a/drivers/scsi/mvsas/mv_sas.c
|
|
+++ b/drivers/scsi/mvsas/mv_sas.c
|
|
@@ -828,7 +828,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
|
|
dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc);
|
|
if (!sas_protocol_ata(task->task_proto))
|
|
if (n_elem)
|
|
- dma_unmap_sg(mvi->dev, task->scatter, n_elem,
|
|
+ dma_unmap_sg(mvi->dev, task->scatter, task->num_scatter,
|
|
task->data_dir);
|
|
prep_out:
|
|
return rc;
|
|
@@ -874,7 +874,7 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
|
|
if (!sas_protocol_ata(task->task_proto))
|
|
if (slot->n_elem)
|
|
dma_unmap_sg(mvi->dev, task->scatter,
|
|
- slot->n_elem, task->data_dir);
|
|
+ task->num_scatter, task->data_dir);
|
|
|
|
switch (task->task_proto) {
|
|
case SAS_PROTOCOL_SMP:
|
|
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
|
|
index f2c31e74d8ed06..22bec2453bac9c 100644
|
|
--- a/drivers/scsi/scsi_transport_iscsi.c
|
|
+++ b/drivers/scsi/scsi_transport_iscsi.c
|
|
@@ -2169,6 +2169,8 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
|
|
return 0;
|
|
|
|
iscsi_remove_conn(iscsi_dev_to_conn(dev));
|
|
+ iscsi_put_conn(iscsi_dev_to_conn(dev));
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
|
|
index fe694fec16b516..873c920eb0cf06 100644
|
|
--- a/drivers/scsi/sd.c
|
|
+++ b/drivers/scsi/sd.c
|
|
@@ -3895,7 +3895,9 @@ static void sd_shutdown(struct device *dev)
|
|
if ((system_state != SYSTEM_RESTART &&
|
|
sdkp->device->manage_system_start_stop) ||
|
|
(system_state == SYSTEM_POWER_OFF &&
|
|
- sdkp->device->manage_shutdown)) {
|
|
+ sdkp->device->manage_shutdown) ||
|
|
+ (system_state == SYSTEM_RUNNING &&
|
|
+ sdkp->device->manage_runtime_start_stop)) {
|
|
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
|
|
sd_start_stop_device(sdkp, 0);
|
|
}
|
|
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
|
|
index 7e50eeb4cc6f2f..2222ca4fa6e214 100644
|
|
--- a/drivers/soc/qcom/pmic_glink.c
|
|
+++ b/drivers/soc/qcom/pmic_glink.c
|
|
@@ -145,7 +145,10 @@ static int pmic_glink_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
|
|
return 0;
|
|
}
|
|
|
|
-static void pmic_glink_aux_release(struct device *dev) {}
|
|
+static void pmic_glink_aux_release(struct device *dev)
|
|
+{
|
|
+ of_node_put(dev->of_node);
|
|
+}
|
|
|
|
static int pmic_glink_add_aux_device(struct pmic_glink *pg,
|
|
struct auxiliary_device *aux,
|
|
@@ -159,8 +162,10 @@ static int pmic_glink_add_aux_device(struct pmic_glink *pg,
|
|
aux->dev.release = pmic_glink_aux_release;
|
|
device_set_of_node_from_dev(&aux->dev, parent);
|
|
ret = auxiliary_device_init(aux);
|
|
- if (ret)
|
|
+ if (ret) {
|
|
+ of_node_put(aux->dev.of_node);
|
|
return ret;
|
|
+ }
|
|
|
|
ret = auxiliary_device_add(aux);
|
|
if (ret)
|
|
diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c
|
|
index 5c7161b18b7240..645c4ee24f5b4f 100644
|
|
--- a/drivers/soc/qcom/qmi_encdec.c
|
|
+++ b/drivers/soc/qcom/qmi_encdec.c
|
|
@@ -304,6 +304,8 @@ static int qmi_encode(const struct qmi_elem_info *ei_array, void *out_buf,
|
|
const void *buf_src;
|
|
int encode_tlv = 0;
|
|
int rc;
|
|
+ u8 val8;
|
|
+ u16 val16;
|
|
|
|
if (!ei_array)
|
|
return 0;
|
|
@@ -338,7 +340,6 @@ static int qmi_encode(const struct qmi_elem_info *ei_array, void *out_buf,
|
|
break;
|
|
|
|
case QMI_DATA_LEN:
|
|
- memcpy(&data_len_value, buf_src, temp_ei->elem_size);
|
|
data_len_sz = temp_ei->elem_size == sizeof(u8) ?
|
|
sizeof(u8) : sizeof(u16);
|
|
/* Check to avoid out of range buffer access */
|
|
@@ -348,8 +349,17 @@ static int qmi_encode(const struct qmi_elem_info *ei_array, void *out_buf,
|
|
__func__);
|
|
return -ETOOSMALL;
|
|
}
|
|
- rc = qmi_encode_basic_elem(buf_dst, &data_len_value,
|
|
- 1, data_len_sz);
|
|
+ if (data_len_sz == sizeof(u8)) {
|
|
+ val8 = *(u8 *)buf_src;
|
|
+ data_len_value = (u32)val8;
|
|
+ rc = qmi_encode_basic_elem(buf_dst, &val8,
|
|
+ 1, data_len_sz);
|
|
+ } else {
|
|
+ val16 = *(u16 *)buf_src;
|
|
+ data_len_value = (u32)le16_to_cpu(val16);
|
|
+ rc = qmi_encode_basic_elem(buf_dst, &val16,
|
|
+ 1, data_len_sz);
|
|
+ }
|
|
UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
|
|
encoded_bytes, tlv_len,
|
|
encode_tlv, rc);
|
|
@@ -523,14 +533,23 @@ static int qmi_decode_string_elem(const struct qmi_elem_info *ei_array,
|
|
u32 string_len = 0;
|
|
u32 string_len_sz = 0;
|
|
const struct qmi_elem_info *temp_ei = ei_array;
|
|
+ u8 val8;
|
|
+ u16 val16;
|
|
|
|
if (dec_level == 1) {
|
|
string_len = tlv_len;
|
|
} else {
|
|
string_len_sz = temp_ei->elem_len <= U8_MAX ?
|
|
sizeof(u8) : sizeof(u16);
|
|
- rc = qmi_decode_basic_elem(&string_len, buf_src,
|
|
- 1, string_len_sz);
|
|
+ if (string_len_sz == sizeof(u8)) {
|
|
+ rc = qmi_decode_basic_elem(&val8, buf_src,
|
|
+ 1, string_len_sz);
|
|
+ string_len = (u32)val8;
|
|
+ } else {
|
|
+ rc = qmi_decode_basic_elem(&val16, buf_src,
|
|
+ 1, string_len_sz);
|
|
+ string_len = (u32)val16;
|
|
+ }
|
|
decoded_bytes += rc;
|
|
}
|
|
|
|
@@ -604,6 +623,9 @@ static int qmi_decode(const struct qmi_elem_info *ei_array, void *out_c_struct,
|
|
u32 decoded_bytes = 0;
|
|
const void *buf_src = in_buf;
|
|
int rc;
|
|
+ u8 val8;
|
|
+ u16 val16;
|
|
+ u32 val32;
|
|
|
|
while (decoded_bytes < in_buf_len) {
|
|
if (dec_level >= 2 && temp_ei->data_type == QMI_EOTI)
|
|
@@ -642,9 +664,17 @@ static int qmi_decode(const struct qmi_elem_info *ei_array, void *out_c_struct,
|
|
if (temp_ei->data_type == QMI_DATA_LEN) {
|
|
data_len_sz = temp_ei->elem_size == sizeof(u8) ?
|
|
sizeof(u8) : sizeof(u16);
|
|
- rc = qmi_decode_basic_elem(&data_len_value, buf_src,
|
|
- 1, data_len_sz);
|
|
- memcpy(buf_dst, &data_len_value, sizeof(u32));
|
|
+ if (data_len_sz == sizeof(u8)) {
|
|
+ rc = qmi_decode_basic_elem(&val8, buf_src,
|
|
+ 1, data_len_sz);
|
|
+ data_len_value = (u32)val8;
|
|
+ } else {
|
|
+ rc = qmi_decode_basic_elem(&val16, buf_src,
|
|
+ 1, data_len_sz);
|
|
+ data_len_value = (u32)val16;
|
|
+ }
|
|
+ val32 = cpu_to_le32(data_len_value);
|
|
+ memcpy(buf_dst, &val32, sizeof(u32));
|
|
temp_ei = temp_ei + 1;
|
|
buf_dst = out_c_struct + temp_ei->offset;
|
|
tlv_len -= data_len_sz;
|
|
diff --git a/drivers/soc/tegra/cbb/tegra234-cbb.c b/drivers/soc/tegra/cbb/tegra234-cbb.c
|
|
index 5cf0e8c341644c..e8cc46874c7297 100644
|
|
--- a/drivers/soc/tegra/cbb/tegra234-cbb.c
|
|
+++ b/drivers/soc/tegra/cbb/tegra234-cbb.c
|
|
@@ -185,6 +185,8 @@ static void tegra234_cbb_error_clear(struct tegra_cbb *cbb)
|
|
{
|
|
struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
|
|
|
|
+ writel(0, priv->mon + FABRIC_MN_MASTER_ERR_FORCE_0);
|
|
+
|
|
writel(0x3f, priv->mon + FABRIC_MN_MASTER_ERR_STATUS_0);
|
|
dsb(sy);
|
|
}
|
|
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
|
|
index 68d54887992d91..8ebfa44078e85f 100644
|
|
--- a/drivers/soundwire/stream.c
|
|
+++ b/drivers/soundwire/stream.c
|
|
@@ -1409,7 +1409,7 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream,
|
|
if (ret < 0) {
|
|
dev_err(bus->dev, "Prepare port(s) failed ret = %d\n",
|
|
ret);
|
|
- return ret;
|
|
+ goto restore_params;
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
|
|
index 40680b5fffc9ab..211d9c76665bc4 100644
|
|
--- a/drivers/spi/spi-stm32.c
|
|
+++ b/drivers/spi/spi-stm32.c
|
|
@@ -1808,9 +1808,15 @@ static int stm32_spi_probe(struct platform_device *pdev)
|
|
struct resource *res;
|
|
struct reset_control *rst;
|
|
struct device_node *np = pdev->dev.of_node;
|
|
+ const struct stm32_spi_cfg *cfg;
|
|
bool device_mode;
|
|
int ret;
|
|
- const struct stm32_spi_cfg *cfg = of_device_get_match_data(&pdev->dev);
|
|
+
|
|
+ cfg = of_device_get_match_data(&pdev->dev);
|
|
+ if (!cfg) {
|
|
+ dev_err(&pdev->dev, "Failed to get match data for platform\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
|
|
device_mode = of_property_read_bool(np, "spi-slave");
|
|
if (!cfg->has_device_mode && device_mode) {
|
|
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
|
|
index eac1d570f4372d..dce721f440c5ee 100644
|
|
--- a/drivers/staging/fbtft/fbtft-core.c
|
|
+++ b/drivers/staging/fbtft/fbtft-core.c
|
|
@@ -744,6 +744,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
|
|
return info;
|
|
|
|
release_framebuf:
|
|
+ fb_deferred_io_cleanup(info);
|
|
framebuffer_release(info);
|
|
|
|
alloc_fail:
|
|
diff --git a/drivers/staging/nvec/nvec_power.c b/drivers/staging/nvec/nvec_power.c
|
|
index 9943b1fff1905d..573521e1703bb5 100644
|
|
--- a/drivers/staging/nvec/nvec_power.c
|
|
+++ b/drivers/staging/nvec/nvec_power.c
|
|
@@ -194,7 +194,7 @@ static int nvec_power_bat_notifier(struct notifier_block *nb,
|
|
break;
|
|
case MANUFACTURER:
|
|
memcpy(power->bat_manu, &res->plc, res->length - 2);
|
|
- power->bat_model[res->length - 2] = '\0';
|
|
+ power->bat_manu[res->length - 2] = '\0';
|
|
break;
|
|
case MODEL:
|
|
memcpy(power->bat_model, &res->plc, res->length - 2);
|
|
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
|
|
index da20bd3d46bc78..7dcdaac31546ba 100644
|
|
--- a/drivers/ufs/core/ufshcd.c
|
|
+++ b/drivers/ufs/core/ufshcd.c
|
|
@@ -4274,7 +4274,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
|
|
hba->uic_async_done = NULL;
|
|
if (reenable_intr)
|
|
ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
|
|
- if (ret) {
|
|
+ if (ret && !hba->pm_op_in_progress) {
|
|
ufshcd_set_link_broken(hba);
|
|
ufshcd_schedule_eh_work(hba);
|
|
}
|
|
@@ -4282,6 +4282,14 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
|
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
|
mutex_unlock(&hba->uic_cmd_mutex);
|
|
|
|
+ /*
|
|
+ * If the h8 exit fails during the runtime resume process, it becomes
|
|
+ * stuck and cannot be recovered through the error handler. To fix
|
|
+ * this, use link recovery instead of the error handler.
|
|
+ */
|
|
+ if (ret && hba->pm_op_in_progress)
|
|
+ ret = ufshcd_link_recovery(hba);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
|
|
index 341408410ed934..41118bba91978d 100644
|
|
--- a/drivers/usb/early/xhci-dbc.c
|
|
+++ b/drivers/usb/early/xhci-dbc.c
|
|
@@ -681,6 +681,10 @@ int __init early_xdbc_setup_hardware(void)
|
|
|
|
xdbc.table_base = NULL;
|
|
xdbc.out_buf = NULL;
|
|
+
|
|
+ early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
|
|
+ xdbc.xhci_base = NULL;
|
|
+ xdbc.xhci_length = 0;
|
|
}
|
|
|
|
return ret;
|
|
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
|
|
index 4f326988be867c..22231ad96d6107 100644
|
|
--- a/drivers/usb/gadget/composite.c
|
|
+++ b/drivers/usb/gadget/composite.c
|
|
@@ -2489,6 +2489,11 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
|
|
if (!cdev->os_desc_req->buf) {
|
|
ret = -ENOMEM;
|
|
usb_ep_free_request(ep0, cdev->os_desc_req);
|
|
+ /*
|
|
+ * Set os_desc_req to NULL so that composite_dev_cleanup()
|
|
+ * will not try to free it again.
|
|
+ */
|
|
+ cdev->os_desc_req = NULL;
|
|
goto end;
|
|
}
|
|
cdev->os_desc_req->context = cdev;
|
|
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
|
|
index 749ba3596c2b3f..b350ee080236e2 100644
|
|
--- a/drivers/usb/host/xhci-plat.c
|
|
+++ b/drivers/usb/host/xhci-plat.c
|
|
@@ -149,7 +149,7 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
|
|
int ret;
|
|
int irq;
|
|
struct xhci_plat_priv *priv = NULL;
|
|
- bool of_match;
|
|
+ const struct of_device_id *of_match;
|
|
|
|
if (usb_disabled())
|
|
return -ENODEV;
|
|
diff --git a/drivers/usb/misc/apple-mfi-fastcharge.c b/drivers/usb/misc/apple-mfi-fastcharge.c
|
|
index ac8695195c13c8..8e852f4b8262e6 100644
|
|
--- a/drivers/usb/misc/apple-mfi-fastcharge.c
|
|
+++ b/drivers/usb/misc/apple-mfi-fastcharge.c
|
|
@@ -44,6 +44,7 @@ MODULE_DEVICE_TABLE(usb, mfi_fc_id_table);
|
|
struct mfi_device {
|
|
struct usb_device *udev;
|
|
struct power_supply *battery;
|
|
+ struct power_supply_desc battery_desc;
|
|
int charge_type;
|
|
};
|
|
|
|
@@ -178,6 +179,7 @@ static int mfi_fc_probe(struct usb_device *udev)
|
|
{
|
|
struct power_supply_config battery_cfg = {};
|
|
struct mfi_device *mfi = NULL;
|
|
+ char *battery_name;
|
|
int err;
|
|
|
|
if (!mfi_fc_match(udev))
|
|
@@ -187,23 +189,38 @@ static int mfi_fc_probe(struct usb_device *udev)
|
|
if (!mfi)
|
|
return -ENOMEM;
|
|
|
|
+ battery_name = kasprintf(GFP_KERNEL, "apple_mfi_fastcharge_%d-%d",
|
|
+ udev->bus->busnum, udev->devnum);
|
|
+ if (!battery_name) {
|
|
+ err = -ENOMEM;
|
|
+ goto err_free_mfi;
|
|
+ }
|
|
+
|
|
+ mfi->battery_desc = apple_mfi_fc_desc;
|
|
+ mfi->battery_desc.name = battery_name;
|
|
+
|
|
battery_cfg.drv_data = mfi;
|
|
|
|
mfi->charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
|
|
mfi->battery = power_supply_register(&udev->dev,
|
|
- &apple_mfi_fc_desc,
|
|
+ &mfi->battery_desc,
|
|
&battery_cfg);
|
|
if (IS_ERR(mfi->battery)) {
|
|
dev_err(&udev->dev, "Can't register battery\n");
|
|
err = PTR_ERR(mfi->battery);
|
|
- kfree(mfi);
|
|
- return err;
|
|
+ goto err_free_name;
|
|
}
|
|
|
|
mfi->udev = usb_get_dev(udev);
|
|
dev_set_drvdata(&udev->dev, mfi);
|
|
|
|
return 0;
|
|
+
|
|
+err_free_name:
|
|
+ kfree(battery_name);
|
|
+err_free_mfi:
|
|
+ kfree(mfi);
|
|
+ return err;
|
|
}
|
|
|
|
static void mfi_fc_disconnect(struct usb_device *udev)
|
|
@@ -213,6 +230,7 @@ static void mfi_fc_disconnect(struct usb_device *udev)
|
|
mfi = dev_get_drvdata(&udev->dev);
|
|
if (mfi->battery)
|
|
power_supply_unregister(mfi->battery);
|
|
+ kfree(mfi->battery_desc.name);
|
|
dev_set_drvdata(&udev->dev, NULL);
|
|
usb_put_dev(mfi->udev);
|
|
kfree(mfi);
|
|
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
|
|
index 6c6387d39db82c..ac72b04c997bfb 100644
|
|
--- a/drivers/usb/serial/option.c
|
|
+++ b/drivers/usb/serial/option.c
|
|
@@ -2346,6 +2346,8 @@ static const struct usb_device_id option_ids[] = {
|
|
.driver_info = RSVD(3) },
|
|
{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe145, 0xff), /* Foxconn T99W651 RNDIS */
|
|
.driver_info = RSVD(5) | RSVD(6) },
|
|
+ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe15f, 0xff), /* Foxconn T99W709 */
|
|
+ .driver_info = RSVD(5) },
|
|
{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe167, 0xff), /* Foxconn T99W640 MBIM */
|
|
.driver_info = RSVD(3) },
|
|
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
|
|
diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
|
|
index 610a429c619125..54c3079031e165 100644
|
|
--- a/drivers/vfio/group.c
|
|
+++ b/drivers/vfio/group.c
|
|
@@ -194,11 +194,10 @@ static int vfio_df_group_open(struct vfio_device_file *df)
|
|
* implies they expected translation to exist
|
|
*/
|
|
if (!capable(CAP_SYS_RAWIO) ||
|
|
- vfio_iommufd_device_has_compat_ioas(device, df->iommufd))
|
|
+ vfio_iommufd_device_has_compat_ioas(device, df->iommufd)) {
|
|
ret = -EPERM;
|
|
- else
|
|
- ret = 0;
|
|
- goto out_put_kvm;
|
|
+ goto out_put_kvm;
|
|
+ }
|
|
}
|
|
|
|
ret = vfio_df_open(df);
|
|
diff --git a/drivers/vfio/iommufd.c b/drivers/vfio/iommufd.c
|
|
index 82eba6966fa508..02852899c2aee4 100644
|
|
--- a/drivers/vfio/iommufd.c
|
|
+++ b/drivers/vfio/iommufd.c
|
|
@@ -25,6 +25,10 @@ int vfio_df_iommufd_bind(struct vfio_device_file *df)
|
|
|
|
lockdep_assert_held(&vdev->dev_set->lock);
|
|
|
|
+ /* Returns 0 to permit device opening under noiommu mode */
|
|
+ if (vfio_device_is_noiommu(vdev))
|
|
+ return 0;
|
|
+
|
|
return vdev->ops->bind_iommufd(vdev, ictx, &df->devid);
|
|
}
|
|
|
|
diff --git a/drivers/vfio/pci/pds/vfio_dev.c b/drivers/vfio/pci/pds/vfio_dev.c
|
|
index a286ebcc711262..69d1bcefd7c32e 100644
|
|
--- a/drivers/vfio/pci/pds/vfio_dev.c
|
|
+++ b/drivers/vfio/pci/pds/vfio_dev.c
|
|
@@ -231,6 +231,7 @@ static const struct vfio_device_ops pds_vfio_ops = {
|
|
.bind_iommufd = vfio_iommufd_physical_bind,
|
|
.unbind_iommufd = vfio_iommufd_physical_unbind,
|
|
.attach_ioas = vfio_iommufd_physical_attach_ioas,
|
|
+ .detach_ioas = vfio_iommufd_physical_detach_ioas,
|
|
};
|
|
|
|
const struct vfio_device_ops *pds_vfio_ops_info(void)
|
|
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
|
|
index fa168b43423954..3f139360752e29 100644
|
|
--- a/drivers/vfio/pci/vfio_pci_core.c
|
|
+++ b/drivers/vfio/pci/vfio_pci_core.c
|
|
@@ -2235,7 +2235,7 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
|
|
return -EBUSY;
|
|
}
|
|
|
|
- if (pci_is_root_bus(pdev->bus)) {
|
|
+ if (pci_is_root_bus(pdev->bus) || pdev->is_virtfn) {
|
|
ret = vfio_assign_device_set(&vdev->vdev, vdev);
|
|
} else if (!pci_probe_reset_slot(pdev->slot)) {
|
|
ret = vfio_assign_device_set(&vdev->vdev, pdev->slot);
|
|
diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
|
|
index 40732e8ed4c6fb..edb631e5e7ec99 100644
|
|
--- a/drivers/vfio/vfio_main.c
|
|
+++ b/drivers/vfio/vfio_main.c
|
|
@@ -537,7 +537,8 @@ void vfio_df_close(struct vfio_device_file *df)
|
|
|
|
lockdep_assert_held(&device->dev_set->lock);
|
|
|
|
- vfio_assert_device_open(device);
|
|
+ if (!vfio_assert_device_open(device))
|
|
+ return;
|
|
if (device->open_count == 1)
|
|
vfio_df_device_last_close(df);
|
|
device->open_count--;
|
|
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
|
|
index 66235151115740..90a3c0fc5ab0a6 100644
|
|
--- a/drivers/vhost/scsi.c
|
|
+++ b/drivers/vhost/scsi.c
|
|
@@ -1032,10 +1032,8 @@ vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
|
|
/* validated at handler entry */
|
|
vs_tpg = vhost_vq_get_backend(vq);
|
|
tpg = READ_ONCE(vs_tpg[*vc->target]);
|
|
- if (unlikely(!tpg)) {
|
|
- vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
|
|
+ if (unlikely(!tpg))
|
|
goto out;
|
|
- }
|
|
}
|
|
|
|
if (tpgp)
|
|
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
|
|
index 9d095fe03e18ba..ed68ba89b80b8f 100644
|
|
--- a/drivers/video/fbdev/core/fbcon.c
|
|
+++ b/drivers/video/fbdev/core/fbcon.c
|
|
@@ -935,13 +935,13 @@ static const char *fbcon_startup(void)
|
|
int rows, cols;
|
|
|
|
/*
|
|
- * If num_registered_fb is zero, this is a call for the dummy part.
|
|
+ * If fbcon_num_registered_fb is zero, this is a call for the dummy part.
|
|
* The frame buffer devices weren't initialized yet.
|
|
*/
|
|
if (!fbcon_num_registered_fb || info_idx == -1)
|
|
return display_desc;
|
|
/*
|
|
- * Instead of blindly using registered_fb[0], we use info_idx, set by
|
|
+ * Instead of blindly using fbcon_registered_fb[0], we use info_idx, set by
|
|
* fbcon_fb_registered();
|
|
*/
|
|
info = fbcon_registered_fb[info_idx];
|
|
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
|
|
index 7042a43b81d856..5f9e98423d6569 100644
|
|
--- a/drivers/video/fbdev/imxfb.c
|
|
+++ b/drivers/video/fbdev/imxfb.c
|
|
@@ -1004,8 +1004,13 @@ static int imxfb_probe(struct platform_device *pdev)
|
|
info->fix.smem_start = fbi->map_dma;
|
|
|
|
INIT_LIST_HEAD(&info->modelist);
|
|
- for (i = 0; i < fbi->num_modes; i++)
|
|
- fb_add_videomode(&fbi->mode[i].mode, &info->modelist);
|
|
+ for (i = 0; i < fbi->num_modes; i++) {
|
|
+ ret = fb_add_videomode(&fbi->mode[i].mode, &info->modelist);
|
|
+ if (ret) {
|
|
+ dev_err(&pdev->dev, "Failed to add videomode\n");
|
|
+ goto failed_cmap;
|
|
+ }
|
|
+ }
|
|
|
|
/*
|
|
* This makes sure that our colour bitfield
|
|
diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c
|
|
index 5ed33df68e9aba..e611d60316c683 100644
|
|
--- a/drivers/watchdog/ziirave_wdt.c
|
|
+++ b/drivers/watchdog/ziirave_wdt.c
|
|
@@ -302,6 +302,9 @@ static int ziirave_firm_verify(struct watchdog_device *wdd,
|
|
const u16 len = be16_to_cpu(rec->len);
|
|
const u32 addr = be32_to_cpu(rec->addr);
|
|
|
|
+ if (len > sizeof(data))
|
|
+ return -EINVAL;
|
|
+
|
|
if (ziirave_firm_addr_readonly(addr))
|
|
continue;
|
|
|
|
diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h
|
|
index 9c286b2a190016..ac8ce3179ba2e9 100644
|
|
--- a/drivers/xen/gntdev-common.h
|
|
+++ b/drivers/xen/gntdev-common.h
|
|
@@ -26,6 +26,10 @@ struct gntdev_priv {
|
|
/* lock protects maps and freeable_maps. */
|
|
struct mutex lock;
|
|
|
|
+ /* Free instances of struct gntdev_copy_batch. */
|
|
+ struct gntdev_copy_batch *batch;
|
|
+ struct mutex batch_lock;
|
|
+
|
|
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
|
|
/* Device for which DMA memory is allocated. */
|
|
struct device *dma_dev;
|
|
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
|
|
index 61faea1f066305..1f21607656182a 100644
|
|
--- a/drivers/xen/gntdev.c
|
|
+++ b/drivers/xen/gntdev.c
|
|
@@ -56,6 +56,18 @@ MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
|
|
"Gerd Hoffmann <kraxel@redhat.com>");
|
|
MODULE_DESCRIPTION("User-space granted page access driver");
|
|
|
|
+#define GNTDEV_COPY_BATCH 16
|
|
+
|
|
+struct gntdev_copy_batch {
|
|
+ struct gnttab_copy ops[GNTDEV_COPY_BATCH];
|
|
+ struct page *pages[GNTDEV_COPY_BATCH];
|
|
+ s16 __user *status[GNTDEV_COPY_BATCH];
|
|
+ unsigned int nr_ops;
|
|
+ unsigned int nr_pages;
|
|
+ bool writeable;
|
|
+ struct gntdev_copy_batch *next;
|
|
+};
|
|
+
|
|
static unsigned int limit = 64*1024;
|
|
module_param(limit, uint, 0644);
|
|
MODULE_PARM_DESC(limit,
|
|
@@ -584,6 +596,8 @@ static int gntdev_open(struct inode *inode, struct file *flip)
|
|
INIT_LIST_HEAD(&priv->maps);
|
|
mutex_init(&priv->lock);
|
|
|
|
+ mutex_init(&priv->batch_lock);
|
|
+
|
|
#ifdef CONFIG_XEN_GNTDEV_DMABUF
|
|
priv->dmabuf_priv = gntdev_dmabuf_init(flip);
|
|
if (IS_ERR(priv->dmabuf_priv)) {
|
|
@@ -608,6 +622,7 @@ static int gntdev_release(struct inode *inode, struct file *flip)
|
|
{
|
|
struct gntdev_priv *priv = flip->private_data;
|
|
struct gntdev_grant_map *map;
|
|
+ struct gntdev_copy_batch *batch;
|
|
|
|
pr_debug("priv %p\n", priv);
|
|
|
|
@@ -620,6 +635,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
|
|
}
|
|
mutex_unlock(&priv->lock);
|
|
|
|
+ mutex_lock(&priv->batch_lock);
|
|
+ while (priv->batch) {
|
|
+ batch = priv->batch;
|
|
+ priv->batch = batch->next;
|
|
+ kfree(batch);
|
|
+ }
|
|
+ mutex_unlock(&priv->batch_lock);
|
|
+
|
|
#ifdef CONFIG_XEN_GNTDEV_DMABUF
|
|
gntdev_dmabuf_fini(priv->dmabuf_priv);
|
|
#endif
|
|
@@ -785,17 +808,6 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
|
|
return rc;
|
|
}
|
|
|
|
-#define GNTDEV_COPY_BATCH 16
|
|
-
|
|
-struct gntdev_copy_batch {
|
|
- struct gnttab_copy ops[GNTDEV_COPY_BATCH];
|
|
- struct page *pages[GNTDEV_COPY_BATCH];
|
|
- s16 __user *status[GNTDEV_COPY_BATCH];
|
|
- unsigned int nr_ops;
|
|
- unsigned int nr_pages;
|
|
- bool writeable;
|
|
-};
|
|
-
|
|
static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
|
|
unsigned long *gfn)
|
|
{
|
|
@@ -953,36 +965,53 @@ static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
|
|
static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u)
|
|
{
|
|
struct ioctl_gntdev_grant_copy copy;
|
|
- struct gntdev_copy_batch batch;
|
|
+ struct gntdev_copy_batch *batch;
|
|
unsigned int i;
|
|
int ret = 0;
|
|
|
|
if (copy_from_user(©, u, sizeof(copy)))
|
|
return -EFAULT;
|
|
|
|
- batch.nr_ops = 0;
|
|
- batch.nr_pages = 0;
|
|
+ mutex_lock(&priv->batch_lock);
|
|
+ if (!priv->batch) {
|
|
+ batch = kmalloc(sizeof(*batch), GFP_KERNEL);
|
|
+ } else {
|
|
+ batch = priv->batch;
|
|
+ priv->batch = batch->next;
|
|
+ }
|
|
+ mutex_unlock(&priv->batch_lock);
|
|
+ if (!batch)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ batch->nr_ops = 0;
|
|
+ batch->nr_pages = 0;
|
|
|
|
for (i = 0; i < copy.count; i++) {
|
|
struct gntdev_grant_copy_segment seg;
|
|
|
|
if (copy_from_user(&seg, ©.segments[i], sizeof(seg))) {
|
|
ret = -EFAULT;
|
|
+ gntdev_put_pages(batch);
|
|
goto out;
|
|
}
|
|
|
|
- ret = gntdev_grant_copy_seg(&batch, &seg, ©.segments[i].status);
|
|
- if (ret < 0)
|
|
+ ret = gntdev_grant_copy_seg(batch, &seg, ©.segments[i].status);
|
|
+ if (ret < 0) {
|
|
+ gntdev_put_pages(batch);
|
|
goto out;
|
|
+ }
|
|
|
|
cond_resched();
|
|
}
|
|
- if (batch.nr_ops)
|
|
- ret = gntdev_copy(&batch);
|
|
- return ret;
|
|
+ if (batch->nr_ops)
|
|
+ ret = gntdev_copy(batch);
|
|
+
|
|
+ out:
|
|
+ mutex_lock(&priv->batch_lock);
|
|
+ batch->next = priv->batch;
|
|
+ priv->batch = batch;
|
|
+ mutex_unlock(&priv->batch_lock);
|
|
|
|
- out:
|
|
- gntdev_put_pages(&batch);
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
|
|
index a123bb26acd8bd..d37104aa847a74 100644
|
|
--- a/fs/f2fs/data.c
|
|
+++ b/fs/f2fs/data.c
|
|
@@ -288,7 +288,7 @@ static void f2fs_read_end_io(struct bio *bio)
|
|
{
|
|
struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
|
|
struct bio_post_read_ctx *ctx;
|
|
- bool intask = in_task();
|
|
+ bool intask = in_task() && !irqs_disabled();
|
|
|
|
iostat_update_and_unbind_ctx(bio);
|
|
ctx = bio->bi_private;
|
|
@@ -1552,8 +1552,11 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
|
|
end = pgofs + maxblocks;
|
|
|
|
next_dnode:
|
|
- if (map->m_may_create)
|
|
+ if (map->m_may_create) {
|
|
+ if (f2fs_lfs_mode(sbi))
|
|
+ f2fs_balance_fs(sbi, true);
|
|
f2fs_map_lock(sbi, flag);
|
|
+ }
|
|
|
|
/* When reading holes, we need its node page */
|
|
set_new_dnode(&dn, inode, NULL, NULL, 0);
|
|
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
|
|
index bfa2d89dc9ea3b..6a77581106a9e4 100644
|
|
--- a/fs/f2fs/extent_cache.c
|
|
+++ b/fs/f2fs/extent_cache.c
|
|
@@ -382,7 +382,7 @@ void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage)
|
|
struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext;
|
|
struct extent_tree *et;
|
|
struct extent_node *en;
|
|
- struct extent_info ei;
|
|
+ struct extent_info ei = {0};
|
|
|
|
if (!__may_extent_tree(inode, EX_READ)) {
|
|
/* drop largest read extent */
|
|
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
|
|
index 2d9a86129bd8de..7329f706da83c3 100644
|
|
--- a/fs/f2fs/f2fs.h
|
|
+++ b/fs/f2fs/f2fs.h
|
|
@@ -1245,7 +1245,7 @@ struct f2fs_bio_info {
|
|
#define RDEV(i) (raw_super->devs[i])
|
|
struct f2fs_dev_info {
|
|
struct block_device *bdev;
|
|
- char path[MAX_PATH_LEN];
|
|
+ char path[MAX_PATH_LEN + 1];
|
|
unsigned int total_segments;
|
|
block_t start_blk;
|
|
block_t end_blk;
|
|
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
|
|
index 66721c2093c023..76ec2899cbe892 100644
|
|
--- a/fs/f2fs/inode.c
|
|
+++ b/fs/f2fs/inode.c
|
|
@@ -905,6 +905,19 @@ void f2fs_evict_inode(struct inode *inode)
|
|
f2fs_update_inode_page(inode);
|
|
if (dquot_initialize_needed(inode))
|
|
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
|
|
+
|
|
+ /*
|
|
+ * If both f2fs_truncate() and f2fs_update_inode_page() failed
|
|
+ * due to fuzzed corrupted inode, call f2fs_inode_synced() to
|
|
+ * avoid triggering later f2fs_bug_on().
|
|
+ */
|
|
+ if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
|
|
+ f2fs_warn(sbi,
|
|
+ "f2fs_evict_inode: inode is dirty, ino:%lu",
|
|
+ inode->i_ino);
|
|
+ f2fs_inode_synced(inode);
|
|
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
|
|
+ }
|
|
}
|
|
if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
|
|
sb_end_intwrite(inode->i_sb);
|
|
@@ -921,8 +934,12 @@ void f2fs_evict_inode(struct inode *inode)
|
|
if (likely(!f2fs_cp_error(sbi) &&
|
|
!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
|
|
f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
|
|
- else
|
|
- f2fs_inode_synced(inode);
|
|
+
|
|
+ /*
|
|
+ * anyway, it needs to remove the inode from sbi->inode_list[DIRTY_META]
|
|
+ * list to avoid UAF in f2fs_sync_inode_meta() during checkpoint.
|
|
+ */
|
|
+ f2fs_inode_synced(inode);
|
|
|
|
/* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */
|
|
if (inode->i_ino)
|
|
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
|
|
index cd2ec6acc71776..a51a56bd0b61a4 100644
|
|
--- a/fs/f2fs/segment.h
|
|
+++ b/fs/f2fs/segment.h
|
|
@@ -606,8 +606,7 @@ static inline void __get_secs_required(struct f2fs_sb_info *sbi,
|
|
unsigned int dent_blocks = total_dent_blocks % CAP_BLKS_PER_SEC(sbi);
|
|
unsigned int data_blocks = 0;
|
|
|
|
- if (f2fs_lfs_mode(sbi) &&
|
|
- unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
|
|
+ if (f2fs_lfs_mode(sbi)) {
|
|
total_data_blocks = get_pages(sbi, F2FS_DIRTY_DATA);
|
|
data_secs = total_data_blocks / CAP_BLKS_PER_SEC(sbi);
|
|
data_blocks = total_data_blocks % CAP_BLKS_PER_SEC(sbi);
|
|
@@ -616,7 +615,7 @@ static inline void __get_secs_required(struct f2fs_sb_info *sbi,
|
|
if (lower_p)
|
|
*lower_p = node_secs + dent_secs + data_secs;
|
|
if (upper_p)
|
|
- *upper_p = node_secs + dent_secs +
|
|
+ *upper_p = node_secs + dent_secs + data_secs +
|
|
(node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0) +
|
|
(data_blocks ? 1 : 0);
|
|
if (curseg_p)
|
|
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
|
|
index 268ff47b039636..fd921e07e12b28 100644
|
|
--- a/fs/gfs2/util.c
|
|
+++ b/fs/gfs2/util.c
|
|
@@ -232,32 +232,23 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
|
|
*/
|
|
ret = gfs2_glock_nq(&sdp->sd_live_gh);
|
|
|
|
+ gfs2_glock_put(live_gl); /* drop extra reference we acquired */
|
|
+ clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
|
|
+
|
|
/*
|
|
* If we actually got the "live" lock in EX mode, there are no other
|
|
- * nodes available to replay our journal. So we try to replay it
|
|
- * ourselves. We hold the "live" glock to prevent other mounters
|
|
- * during recovery, then just dequeue it and reacquire it in our
|
|
- * normal SH mode. Just in case the problem that caused us to
|
|
- * withdraw prevents us from recovering our journal (e.g. io errors
|
|
- * and such) we still check if the journal is clean before proceeding
|
|
- * but we may wait forever until another mounter does the recovery.
|
|
+ * nodes available to replay our journal.
|
|
*/
|
|
if (ret == 0) {
|
|
- fs_warn(sdp, "No other mounters found. Trying to recover our "
|
|
- "own journal jid %d.\n", sdp->sd_lockstruct.ls_jid);
|
|
- if (gfs2_recover_journal(sdp->sd_jdesc, 1))
|
|
- fs_warn(sdp, "Unable to recover our journal jid %d.\n",
|
|
- sdp->sd_lockstruct.ls_jid);
|
|
- gfs2_glock_dq_wait(&sdp->sd_live_gh);
|
|
- gfs2_holder_reinit(LM_ST_SHARED,
|
|
- LM_FLAG_NOEXP | GL_EXACT | GL_NOPID,
|
|
- &sdp->sd_live_gh);
|
|
- gfs2_glock_nq(&sdp->sd_live_gh);
|
|
+ fs_warn(sdp, "No other mounters found.\n");
|
|
+ /*
|
|
+ * We are about to release the lockspace. By keeping live_gl
|
|
+ * locked here, we ensure that the next mounter coming along
|
|
+ * will be a "first" mounter which will perform recovery.
|
|
+ */
|
|
+ goto skip_recovery;
|
|
}
|
|
|
|
- gfs2_glock_put(live_gl); /* drop extra reference we acquired */
|
|
- clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
|
|
-
|
|
/*
|
|
* At this point our journal is evicted, so we need to get a new inode
|
|
* for it. Once done, we need to call gfs2_find_jhead which
|
|
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
|
|
index 61ed76d1039276..6a89f22d89677d 100644
|
|
--- a/fs/hfs/inode.c
|
|
+++ b/fs/hfs/inode.c
|
|
@@ -697,6 +697,7 @@ static const struct file_operations hfs_file_operations = {
|
|
.write_iter = generic_file_write_iter,
|
|
.mmap = generic_file_mmap,
|
|
.splice_read = filemap_splice_read,
|
|
+ .splice_write = iter_file_splice_write,
|
|
.fsync = hfs_file_fsync,
|
|
.open = hfs_file_open,
|
|
.release = hfs_file_release,
|
|
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
|
|
index 9c51867dddc51f..f6ac98b0d44ffd 100644
|
|
--- a/fs/hfsplus/extents.c
|
|
+++ b/fs/hfsplus/extents.c
|
|
@@ -342,9 +342,6 @@ static int hfsplus_free_extents(struct super_block *sb,
|
|
int i;
|
|
int err = 0;
|
|
|
|
- /* Mapping the allocation file may lock the extent tree */
|
|
- WARN_ON(mutex_is_locked(&HFSPLUS_SB(sb)->ext_tree->tree_lock));
|
|
-
|
|
hfsplus_dump_extent(extent);
|
|
for (i = 0; i < 8; extent++, i++) {
|
|
count = be32_to_cpu(extent->block_count);
|
|
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
|
|
index c65c8c4b03dd38..73ff191ff2adc5 100644
|
|
--- a/fs/hfsplus/inode.c
|
|
+++ b/fs/hfsplus/inode.c
|
|
@@ -373,6 +373,7 @@ static const struct file_operations hfsplus_file_operations = {
|
|
.write_iter = generic_file_write_iter,
|
|
.mmap = generic_file_mmap,
|
|
.splice_read = filemap_splice_read,
|
|
+ .splice_write = iter_file_splice_write,
|
|
.fsync = hfsplus_file_fsync,
|
|
.open = hfsplus_file_open,
|
|
.release = hfsplus_file_release,
|
|
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
|
|
index 35e063c9f3a42e..5a877261c3fe48 100644
|
|
--- a/fs/jfs/jfs_dmap.c
|
|
+++ b/fs/jfs/jfs_dmap.c
|
|
@@ -1809,8 +1809,10 @@ dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
|
|
return -EIO;
|
|
dp = (struct dmap *) mp->data;
|
|
|
|
- if (dp->tree.budmin < 0)
|
|
+ if (dp->tree.budmin < 0) {
|
|
+ release_metapage(mp);
|
|
return -EIO;
|
|
+ }
|
|
|
|
/* try to allocate the blocks.
|
|
*/
|
|
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
|
|
index 38918638423596..385baf871800c4 100644
|
|
--- a/fs/nfs/dir.c
|
|
+++ b/fs/nfs/dir.c
|
|
@@ -1835,9 +1835,7 @@ static void block_revalidate(struct dentry *dentry)
|
|
|
|
static void unblock_revalidate(struct dentry *dentry)
|
|
{
|
|
- /* store_release ensures wait_var_event() sees the update */
|
|
- smp_store_release(&dentry->d_fsdata, NULL);
|
|
- wake_up_var(&dentry->d_fsdata);
|
|
+ store_release_wake_up(&dentry->d_fsdata, NULL);
|
|
}
|
|
|
|
/*
|
|
diff --git a/fs/nfs/export.c b/fs/nfs/export.c
|
|
index be686b8e0c5465..aeb17adcb2b646 100644
|
|
--- a/fs/nfs/export.c
|
|
+++ b/fs/nfs/export.c
|
|
@@ -66,14 +66,21 @@ nfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
|
|
{
|
|
struct nfs_fattr *fattr = NULL;
|
|
struct nfs_fh *server_fh = nfs_exp_embedfh(fid->raw);
|
|
- size_t fh_size = offsetof(struct nfs_fh, data) + server_fh->size;
|
|
+ size_t fh_size = offsetof(struct nfs_fh, data);
|
|
const struct nfs_rpc_ops *rpc_ops;
|
|
struct dentry *dentry;
|
|
struct inode *inode;
|
|
- int len = EMBED_FH_OFF + XDR_QUADLEN(fh_size);
|
|
+ int len = EMBED_FH_OFF;
|
|
u32 *p = fid->raw;
|
|
int ret;
|
|
|
|
+ /* Initial check of bounds */
|
|
+ if (fh_len < len + XDR_QUADLEN(fh_size) ||
|
|
+ fh_len > XDR_QUADLEN(NFS_MAXFHSIZE))
|
|
+ return NULL;
|
|
+ /* Calculate embedded filehandle size */
|
|
+ fh_size += server_fh->size;
|
|
+ len += XDR_QUADLEN(fh_size);
|
|
/* NULL translates to ESTALE */
|
|
if (fh_len < len || fh_type != len)
|
|
return NULL;
|
|
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
|
|
index 0a26444fe20233..7354b6b1047833 100644
|
|
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
|
|
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
|
|
@@ -745,14 +745,14 @@ ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
|
|
{
|
|
struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
|
|
struct nfs4_ff_layout_mirror *mirror;
|
|
- struct nfs4_pnfs_ds *ds;
|
|
+ struct nfs4_pnfs_ds *ds = ERR_PTR(-EAGAIN);
|
|
u32 idx;
|
|
|
|
/* mirrors are initially sorted by efficiency */
|
|
for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
|
|
mirror = FF_LAYOUT_COMP(lseg, idx);
|
|
ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
|
|
- if (!ds)
|
|
+ if (IS_ERR(ds))
|
|
continue;
|
|
|
|
if (check_device &&
|
|
@@ -760,10 +760,10 @@ ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
|
|
continue;
|
|
|
|
*best_idx = idx;
|
|
- return ds;
|
|
+ break;
|
|
}
|
|
|
|
- return NULL;
|
|
+ return ds;
|
|
}
|
|
|
|
static struct nfs4_pnfs_ds *
|
|
@@ -933,7 +933,7 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
|
|
for (i = 0; i < pgio->pg_mirror_count; i++) {
|
|
mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
|
|
ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
|
|
- if (!ds) {
|
|
+ if (IS_ERR(ds)) {
|
|
if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
|
|
goto out_mds;
|
|
pnfs_generic_pg_cleanup(pgio);
|
|
@@ -1839,6 +1839,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
|
|
u32 idx = hdr->pgio_mirror_idx;
|
|
int vers;
|
|
struct nfs_fh *fh;
|
|
+ bool ds_fatal_error = false;
|
|
|
|
dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
|
|
__func__, hdr->inode->i_ino,
|
|
@@ -1846,8 +1847,10 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
|
|
|
|
mirror = FF_LAYOUT_COMP(lseg, idx);
|
|
ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
|
|
- if (!ds)
|
|
+ if (IS_ERR(ds)) {
|
|
+ ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
|
|
goto out_failed;
|
|
+ }
|
|
|
|
ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
|
|
hdr->inode);
|
|
@@ -1888,7 +1891,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
|
|
return PNFS_ATTEMPTED;
|
|
|
|
out_failed:
|
|
- if (ff_layout_avoid_mds_available_ds(lseg))
|
|
+ if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
|
|
return PNFS_TRY_AGAIN;
|
|
trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
|
|
hdr->args.offset, hdr->args.count,
|
|
@@ -1909,11 +1912,14 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
|
|
int vers;
|
|
struct nfs_fh *fh;
|
|
u32 idx = hdr->pgio_mirror_idx;
|
|
+ bool ds_fatal_error = false;
|
|
|
|
mirror = FF_LAYOUT_COMP(lseg, idx);
|
|
ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
|
|
- if (!ds)
|
|
+ if (IS_ERR(ds)) {
|
|
+ ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
|
|
goto out_failed;
|
|
+ }
|
|
|
|
ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
|
|
hdr->inode);
|
|
@@ -1956,7 +1962,7 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
|
|
return PNFS_ATTEMPTED;
|
|
|
|
out_failed:
|
|
- if (ff_layout_avoid_mds_available_ds(lseg))
|
|
+ if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
|
|
return PNFS_TRY_AGAIN;
|
|
trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
|
|
hdr->args.offset, hdr->args.count,
|
|
@@ -1998,7 +2004,7 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
|
|
idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
|
|
mirror = FF_LAYOUT_COMP(lseg, idx);
|
|
ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
|
|
- if (!ds)
|
|
+ if (IS_ERR(ds))
|
|
goto out_err;
|
|
|
|
ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
|
|
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
|
|
index d21c5ecfbf1cc3..95d5dca6714563 100644
|
|
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
|
|
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
|
|
@@ -370,11 +370,11 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
|
|
struct nfs4_ff_layout_mirror *mirror,
|
|
bool fail_return)
|
|
{
|
|
- struct nfs4_pnfs_ds *ds = NULL;
|
|
+ struct nfs4_pnfs_ds *ds;
|
|
struct inode *ino = lseg->pls_layout->plh_inode;
|
|
struct nfs_server *s = NFS_SERVER(ino);
|
|
unsigned int max_payload;
|
|
- int status;
|
|
+ int status = -EAGAIN;
|
|
|
|
if (!ff_layout_init_mirror_ds(lseg->pls_layout, mirror))
|
|
goto noconnect;
|
|
@@ -412,7 +412,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
|
|
ff_layout_send_layouterror(lseg);
|
|
if (fail_return || !ff_layout_has_available_ds(lseg))
|
|
pnfs_error_mark_layout_for_return(ino, lseg);
|
|
- ds = NULL;
|
|
+ ds = ERR_PTR(status);
|
|
out:
|
|
return ds;
|
|
}
|
|
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
|
|
index c29ad2e1d41635..8870c72416acbd 100644
|
|
--- a/fs/nfs/internal.h
|
|
+++ b/fs/nfs/internal.h
|
|
@@ -613,9 +613,12 @@ nfs_write_match_verf(const struct nfs_writeverf *verf,
|
|
|
|
static inline gfp_t nfs_io_gfp_mask(void)
|
|
{
|
|
- if (current->flags & PF_WQ_WORKER)
|
|
- return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
|
|
- return GFP_KERNEL;
|
|
+ gfp_t ret = current_gfp_context(GFP_KERNEL);
|
|
+
|
|
+ /* For workers __GFP_NORETRY only with __GFP_IO or __GFP_FS */
|
|
+ if ((current->flags & PF_WQ_WORKER) && ret == GFP_KERNEL)
|
|
+ ret |= __GFP_NORETRY | __GFP_NOWARN;
|
|
+ return ret;
|
|
}
|
|
|
|
/*
|
|
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
|
|
index 3085a2faab2d34..89d88d37e0cc5c 100644
|
|
--- a/fs/nfs/nfs4proc.c
|
|
+++ b/fs/nfs/nfs4proc.c
|
|
@@ -10630,7 +10630,7 @@ const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
|
|
|
|
static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
|
|
{
|
|
- ssize_t error, error2, error3, error4;
|
|
+ ssize_t error, error2, error3, error4 = 0;
|
|
size_t left = size;
|
|
|
|
error = generic_listxattr(dentry, list, left);
|
|
@@ -10658,9 +10658,11 @@ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
|
|
left -= error3;
|
|
}
|
|
|
|
- error4 = security_inode_listsecurity(d_inode(dentry), list, left);
|
|
- if (error4 < 0)
|
|
- return error4;
|
|
+ if (!nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) {
|
|
+ error4 = security_inode_listsecurity(d_inode(dentry), list, left);
|
|
+ if (error4 < 0)
|
|
+ return error4;
|
|
+ }
|
|
|
|
error += error2 + error3 + error4;
|
|
if (size && error > size)
|
|
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
|
|
index 9dac7f6e72d2bd..723ff9cad9ed92 100644
|
|
--- a/fs/notify/fanotify/fanotify.c
|
|
+++ b/fs/notify/fanotify/fanotify.c
|
|
@@ -445,7 +445,13 @@ static int fanotify_encode_fh(struct fanotify_fh *fh, struct inode *inode,
|
|
dwords = fh_len >> 2;
|
|
type = exportfs_encode_fid(inode, buf, &dwords);
|
|
err = -EINVAL;
|
|
- if (type <= 0 || type == FILEID_INVALID || fh_len != dwords << 2)
|
|
+ /*
|
|
+ * Unlike file_handle, type and len of struct fanotify_fh are u8.
|
|
+ * Traditionally, filesystem return handle_type < 0xff, but there
|
|
+ * is no enforecement for that in vfs.
|
|
+ */
|
|
+ BUILD_BUG_ON(MAX_HANDLE_SZ > 0xff || FILEID_INVALID > 0xff);
|
|
+ if (type <= 0 || type >= FILEID_INVALID || fh_len != dwords << 2)
|
|
goto out_err;
|
|
|
|
fh->type = type;
|
|
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
|
|
index 4aea458216117f..a7fe2e02c32ee6 100644
|
|
--- a/fs/ntfs3/file.c
|
|
+++ b/fs/ntfs3/file.c
|
|
@@ -299,7 +299,10 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
|
}
|
|
|
|
if (ni->i_valid < to) {
|
|
- inode_lock(inode);
|
|
+ if (!inode_trylock(inode)) {
|
|
+ err = -EAGAIN;
|
|
+ goto out;
|
|
+ }
|
|
err = ntfs_extend_initialized_size(file, ni,
|
|
ni->i_valid, to);
|
|
inode_unlock(inode);
|
|
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
|
|
index 3c876c468c2c47..5c45fec832b00b 100644
|
|
--- a/fs/ntfs3/frecord.c
|
|
+++ b/fs/ntfs3/frecord.c
|
|
@@ -3058,8 +3058,7 @@ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
|
|
* ni_rename - Remove one name and insert new name.
|
|
*/
|
|
int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni,
|
|
- struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE *new_de,
|
|
- bool *is_bad)
|
|
+ struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE *new_de)
|
|
{
|
|
int err;
|
|
struct NTFS_DE *de2 = NULL;
|
|
@@ -3082,8 +3081,8 @@ int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni,
|
|
err = ni_add_name(new_dir_ni, ni, new_de);
|
|
if (!err) {
|
|
err = ni_remove_name(dir_ni, ni, de, &de2, &undo);
|
|
- if (err && ni_remove_name(new_dir_ni, ni, new_de, &de2, &undo))
|
|
- *is_bad = true;
|
|
+ WARN_ON(err && ni_remove_name(new_dir_ni, ni, new_de, &de2,
|
|
+ &undo));
|
|
}
|
|
|
|
/*
|
|
diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
|
|
index 61c4da8e6c3de9..f5901c23ab9379 100644
|
|
--- a/fs/ntfs3/namei.c
|
|
+++ b/fs/ntfs3/namei.c
|
|
@@ -261,7 +261,7 @@ static int ntfs_rename(struct mnt_idmap *idmap, struct inode *dir,
|
|
struct ntfs_inode *ni = ntfs_i(inode);
|
|
struct inode *new_inode = d_inode(new_dentry);
|
|
struct NTFS_DE *de, *new_de;
|
|
- bool is_same, is_bad;
|
|
+ bool is_same;
|
|
/*
|
|
* de - memory of PATH_MAX bytes:
|
|
* [0-1024) - original name (dentry->d_name)
|
|
@@ -330,12 +330,8 @@ static int ntfs_rename(struct mnt_idmap *idmap, struct inode *dir,
|
|
if (dir_ni != new_dir_ni)
|
|
ni_lock_dir2(new_dir_ni);
|
|
|
|
- is_bad = false;
|
|
- err = ni_rename(dir_ni, new_dir_ni, ni, de, new_de, &is_bad);
|
|
- if (is_bad) {
|
|
- /* Restore after failed rename failed too. */
|
|
- _ntfs_bad_inode(inode);
|
|
- } else if (!err) {
|
|
+ err = ni_rename(dir_ni, new_dir_ni, ni, de, new_de);
|
|
+ if (!err) {
|
|
simple_rename_timestamp(dir, dentry, new_dir, new_dentry);
|
|
mark_inode_dirty(inode);
|
|
mark_inode_dirty(dir);
|
|
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
|
|
index c98e6868bfbadb..72810d8f62ee9d 100644
|
|
--- a/fs/ntfs3/ntfs_fs.h
|
|
+++ b/fs/ntfs3/ntfs_fs.h
|
|
@@ -577,8 +577,7 @@ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
|
|
struct NTFS_DE *de);
|
|
|
|
int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni,
|
|
- struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE *new_de,
|
|
- bool *is_bad);
|
|
+ struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE *new_de);
|
|
|
|
bool ni_is_dirty(struct inode *inode);
|
|
|
|
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
|
|
index fa41db08848802..b57140ebfad0f7 100644
|
|
--- a/fs/orangefs/orangefs-debugfs.c
|
|
+++ b/fs/orangefs/orangefs-debugfs.c
|
|
@@ -728,8 +728,8 @@ static void do_k_string(void *k_mask, int index)
|
|
|
|
if (*mask & s_kmod_keyword_mask_map[index].mask_val) {
|
|
if ((strlen(kernel_debug_string) +
|
|
- strlen(s_kmod_keyword_mask_map[index].keyword))
|
|
- < ORANGEFS_MAX_DEBUG_STRING_LEN - 1) {
|
|
+ strlen(s_kmod_keyword_mask_map[index].keyword) + 1)
|
|
+ < ORANGEFS_MAX_DEBUG_STRING_LEN) {
|
|
strcat(kernel_debug_string,
|
|
s_kmod_keyword_mask_map[index].keyword);
|
|
strcat(kernel_debug_string, ",");
|
|
@@ -756,7 +756,7 @@ static void do_c_string(void *c_mask, int index)
|
|
(mask->mask2 & cdm_array[index].mask2)) {
|
|
if ((strlen(client_debug_string) +
|
|
strlen(cdm_array[index].keyword) + 1)
|
|
- < ORANGEFS_MAX_DEBUG_STRING_LEN - 2) {
|
|
+ < ORANGEFS_MAX_DEBUG_STRING_LEN) {
|
|
strcat(client_debug_string,
|
|
cdm_array[index].keyword);
|
|
strcat(client_debug_string, ",");
|
|
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
|
|
index c8785d68e870bf..2187d9ca351ced 100644
|
|
--- a/fs/proc/generic.c
|
|
+++ b/fs/proc/generic.c
|
|
@@ -567,6 +567,8 @@ static void pde_set_flags(struct proc_dir_entry *pde)
|
|
if (pde->proc_ops->proc_compat_ioctl)
|
|
pde->flags |= PROC_ENTRY_proc_compat_ioctl;
|
|
#endif
|
|
+ if (pde->proc_ops->proc_lseek)
|
|
+ pde->flags |= PROC_ENTRY_proc_lseek;
|
|
}
|
|
|
|
struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
|
|
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
|
|
index 4b3ae7e0def32e..92772702d3695c 100644
|
|
--- a/fs/proc/inode.c
|
|
+++ b/fs/proc/inode.c
|
|
@@ -494,7 +494,7 @@ static int proc_reg_open(struct inode *inode, struct file *file)
|
|
typeof_member(struct proc_ops, proc_release) release;
|
|
struct pde_opener *pdeo;
|
|
|
|
- if (!pde->proc_ops->proc_lseek)
|
|
+ if (!pde_has_proc_lseek(pde))
|
|
file->f_mode &= ~FMODE_LSEEK;
|
|
|
|
if (pde_is_permanent(pde)) {
|
|
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
|
|
index 445c74a39a931f..fe378136012076 100644
|
|
--- a/fs/proc/internal.h
|
|
+++ b/fs/proc/internal.h
|
|
@@ -98,6 +98,11 @@ static inline bool pde_has_proc_compat_ioctl(const struct proc_dir_entry *pde)
|
|
#endif
|
|
}
|
|
|
|
+static inline bool pde_has_proc_lseek(const struct proc_dir_entry *pde)
|
|
+{
|
|
+ return pde->flags & PROC_ENTRY_proc_lseek;
|
|
+}
|
|
+
|
|
extern struct kmem_cache *proc_dir_entry_cache;
|
|
void pde_free(struct proc_dir_entry *pde);
|
|
|
|
diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
|
|
index 4a20e92474b234..50ad8246ed187c 100644
|
|
--- a/fs/smb/client/cifs_debug.c
|
|
+++ b/fs/smb/client/cifs_debug.c
|
|
@@ -384,7 +384,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
|
|
seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
|
|
"transport status: %x",
|
|
server->smbd_conn->protocol,
|
|
- server->smbd_conn->transport_status);
|
|
+ server->smbd_conn->socket.status);
|
|
seq_printf(m, "\nConn receive_credit_max: %x "
|
|
"send_credit_target: %x max_send_size: %x",
|
|
server->smbd_conn->receive_credit_max,
|
|
diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c
|
|
index bf32bc22ebd69f..7bd29e827c8f1b 100644
|
|
--- a/fs/smb/client/cifsacl.c
|
|
+++ b/fs/smb/client/cifsacl.c
|
|
@@ -187,7 +187,7 @@ compare_sids(const struct smb_sid *ctsid, const struct smb_sid *cwsid)
|
|
/* compare all of the subauth values if any */
|
|
num_sat = ctsid->num_subauth;
|
|
num_saw = cwsid->num_subauth;
|
|
- num_subauth = num_sat < num_saw ? num_sat : num_saw;
|
|
+ num_subauth = min(num_sat, num_saw);
|
|
if (num_subauth) {
|
|
for (i = 0; i < num_subauth; ++i) {
|
|
if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
|
|
diff --git a/fs/smb/client/cifsacl.h b/fs/smb/client/cifsacl.h
|
|
index cbaed8038e3654..05b3650ba0aec8 100644
|
|
--- a/fs/smb/client/cifsacl.h
|
|
+++ b/fs/smb/client/cifsacl.h
|
|
@@ -144,7 +144,7 @@ struct smb3_sd {
|
|
#define ACL_CONTROL_SI 0x0800 /* SACL Auto-Inherited */
|
|
#define ACL_CONTROL_DI 0x0400 /* DACL Auto-Inherited */
|
|
#define ACL_CONTROL_SC 0x0200 /* SACL computed through inheritance */
|
|
-#define ACL_CONTROL_DC 0x0100 /* DACL computed through inheritence */
|
|
+#define ACL_CONTROL_DC 0x0100 /* DACL computed through inheritance */
|
|
#define ACL_CONTROL_SS 0x0080 /* Create server ACL */
|
|
#define ACL_CONTROL_DT 0x0040 /* DACL provided by trusted source */
|
|
#define ACL_CONTROL_SD 0x0020 /* SACL defaulted */
|
|
diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
|
|
index b0473c2567fe68..da3d003cb43db8 100644
|
|
--- a/fs/smb/client/cifsencrypt.c
|
|
+++ b/fs/smb/client/cifsencrypt.c
|
|
@@ -353,7 +353,7 @@ int cifs_verify_signature(struct smb_rqst *rqst,
|
|
cifs_dbg(FYI, "dummy signature received for smb command 0x%x\n",
|
|
cifs_pdu->Command);
|
|
|
|
- /* save off the origiginal signature so we can modify the smb and check
|
|
+ /* save off the original signature so we can modify the smb and check
|
|
its signature against what the server sent */
|
|
memcpy(server_response_sig, cifs_pdu->Signature.SecuritySignature, 8);
|
|
|
|
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
|
|
index bbb0ef18d7b8c8..a1ab95f382d566 100644
|
|
--- a/fs/smb/client/cifsfs.c
|
|
+++ b/fs/smb/client/cifsfs.c
|
|
@@ -161,7 +161,7 @@ __u32 cifs_lock_secret;
|
|
|
|
/*
|
|
* Bumps refcount for cifs super block.
|
|
- * Note that it should be only called if a referece to VFS super block is
|
|
+ * Note that it should be only called if a reference to VFS super block is
|
|
* already held, e.g. in open-type syscalls context. Otherwise it can race with
|
|
* atomic_dec_and_test in deactivate_locked_super.
|
|
*/
|
|
@@ -289,7 +289,7 @@ static void cifs_kill_sb(struct super_block *sb)
|
|
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
|
|
|
|
/*
|
|
- * We ned to release all dentries for the cached directories
|
|
+ * We need to release all dentries for the cached directories
|
|
* before we kill the sb.
|
|
*/
|
|
if (cifs_sb->root) {
|
|
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
|
|
index c9b37f2ebde853..4bafb1adfb2235 100644
|
|
--- a/fs/smb/client/cifsglob.h
|
|
+++ b/fs/smb/client/cifsglob.h
|
|
@@ -785,7 +785,7 @@ struct TCP_Server_Info {
|
|
} compression;
|
|
__u16 signing_algorithm;
|
|
__le16 cipher_type;
|
|
- /* save initital negprot hash */
|
|
+ /* save initial negprot hash */
|
|
__u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
|
|
bool signing_negotiated; /* true if valid signing context rcvd from server */
|
|
bool posix_ext_supported;
|
|
diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
|
|
index 763178b7745424..f4cfb082dfd146 100644
|
|
--- a/fs/smb/client/cifspdu.h
|
|
+++ b/fs/smb/client/cifspdu.h
|
|
@@ -781,7 +781,7 @@ typedef struct smb_com_logoff_andx_rsp {
|
|
__u16 ByteCount;
|
|
} __attribute__((packed)) LOGOFF_ANDX_RSP;
|
|
|
|
-typedef union smb_com_tree_disconnect { /* as an altetnative can use flag on
|
|
+typedef union smb_com_tree_disconnect { /* as an alternative can use flag on
|
|
tree_connect PDU to effect disconnect */
|
|
/* tdis is probably simplest SMB PDU */
|
|
struct {
|
|
@@ -2405,7 +2405,7 @@ struct cifs_posix_ace { /* access control entry (ACE) */
|
|
__le64 cifs_uid; /* or gid */
|
|
} __attribute__((packed));
|
|
|
|
-struct cifs_posix_acl { /* access conrol list (ACL) */
|
|
+struct cifs_posix_acl { /* access control list (ACL) */
|
|
__le16 version;
|
|
__le16 access_entry_count; /* access ACL - count of entries */
|
|
__le16 default_entry_count; /* default ACL - count of entries */
|
|
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
|
|
index db35e68e8a5830..81d425f571e28d 100644
|
|
--- a/fs/smb/client/cifssmb.c
|
|
+++ b/fs/smb/client/cifssmb.c
|
|
@@ -1214,7 +1214,7 @@ CIFS_open(const unsigned int xid, struct cifs_open_parms *oparms, int *oplock,
|
|
req->CreateDisposition = cpu_to_le32(disposition);
|
|
req->CreateOptions = cpu_to_le32(create_options & CREATE_OPTIONS_MASK);
|
|
|
|
- /* BB Expirement with various impersonation levels and verify */
|
|
+ /* BB Experiment with various impersonation levels and verify */
|
|
req->ImpersonationLevel = cpu_to_le32(SECURITY_IMPERSONATION);
|
|
req->SecurityFlags = SECURITY_CONTEXT_TRACKING|SECURITY_EFFECTIVE_ONLY;
|
|
|
|
@@ -2993,7 +2993,7 @@ static void cifs_init_ace(struct cifs_posix_ace *cifs_ace,
|
|
|
|
/**
|
|
* posix_acl_to_cifs - convert ACLs from POSIX ACL to cifs format
|
|
- * @parm_data: ACLs in cifs format to conver to
|
|
+ * @parm_data: ACLs in cifs format to convert to
|
|
* @acl: ACLs in POSIX ACL format to convert from
|
|
* @acl_type: the type of POSIX ACLs stored in @acl
|
|
*
|
|
@@ -3970,7 +3970,7 @@ CIFSFindFirst(const unsigned int xid, struct cifs_tcon *tcon,
|
|
name_len =
|
|
cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
|
|
PATH_MAX, nls_codepage, remap);
|
|
- /* We can not add the asterik earlier in case
|
|
+ /* We can not add the asterisk earlier in case
|
|
it got remapped to 0xF03A as if it were part of the
|
|
directory name instead of a wildcard */
|
|
name_len *= 2;
|
|
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
|
|
index 99a8c6fbd41a65..7a2b81fbd9cfd2 100644
|
|
--- a/fs/smb/client/file.c
|
|
+++ b/fs/smb/client/file.c
|
|
@@ -2421,7 +2421,7 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
|
|
}
|
|
}
|
|
}
|
|
- /* couldn't find useable FH with same pid, try any available */
|
|
+ /* couldn't find usable FH with same pid, try any available */
|
|
if (!any_available) {
|
|
any_available = true;
|
|
goto refind_writable;
|
|
diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
|
|
index 52ee72e562f5f6..90ebff5d0199d9 100644
|
|
--- a/fs/smb/client/fs_context.h
|
|
+++ b/fs/smb/client/fs_context.h
|
|
@@ -263,7 +263,7 @@ struct smb3_fs_context {
|
|
unsigned int min_offload;
|
|
unsigned int retrans;
|
|
bool sockopt_tcp_nodelay:1;
|
|
- /* attribute cache timemout for files and directories in jiffies */
|
|
+ /* attribute cache timeout for files and directories in jiffies */
|
|
unsigned long acregmax;
|
|
unsigned long acdirmax;
|
|
/* timeout for deferred close of files in jiffies */
|
|
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
|
|
index bbbe48447765de..ad77952f6d810b 100644
|
|
--- a/fs/smb/client/misc.c
|
|
+++ b/fs/smb/client/misc.c
|
|
@@ -260,7 +260,7 @@ free_rsp_buf(int resp_buftype, void *rsp)
|
|
}
|
|
|
|
/* NB: MID can not be set if treeCon not passed in, in that
|
|
- case it is responsbility of caller to set the mid */
|
|
+ case it is responsibility of caller to set the mid */
|
|
void
|
|
header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
|
|
const struct cifs_tcon *treeCon, int word_count
|
|
diff --git a/fs/smb/client/netmisc.c b/fs/smb/client/netmisc.c
|
|
index 1b52e6ac431cb0..2a8d71221e5e71 100644
|
|
--- a/fs/smb/client/netmisc.c
|
|
+++ b/fs/smb/client/netmisc.c
|
|
@@ -1003,7 +1003,7 @@ struct timespec64 cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
|
|
year is 2**7, the last year is 1980+127, which means we need only
|
|
consider 2 special case years, ie the years 2000 and 2100, and only
|
|
adjust for the lack of leap year for the year 2100, as 2000 was a
|
|
- leap year (divisable by 400) */
|
|
+ leap year (divisible by 400) */
|
|
if (year >= 120) /* the year 2100 */
|
|
days = days - 1; /* do not count leap year for the year 2100 */
|
|
|
|
diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
|
|
index 0be16f8acd9af5..5febf8afaab042 100644
|
|
--- a/fs/smb/client/readdir.c
|
|
+++ b/fs/smb/client/readdir.c
|
|
@@ -567,7 +567,7 @@ static void cifs_fill_dirent_std(struct cifs_dirent *de,
|
|
const FIND_FILE_STANDARD_INFO *info)
|
|
{
|
|
de->name = &info->FileName[0];
|
|
- /* one byte length, no endianess conversion */
|
|
+ /* one byte length, no endianness conversion */
|
|
de->namelen = info->FileNameLength;
|
|
de->resume_key = info->ResumeKey;
|
|
}
|
|
@@ -832,7 +832,7 @@ static bool emit_cached_dirents(struct cached_dirents *cde,
|
|
* However, this sequence of ->pos values may have holes
|
|
* in it, for example dot-dirs returned from the server
|
|
* are suppressed.
|
|
- * Handle this bu forcing ctx->pos to be the same as the
|
|
+ * Handle this by forcing ctx->pos to be the same as the
|
|
* ->pos of the current dirent we emit from the cache.
|
|
* This means that when we emit these entries from the cache
|
|
* we now emit them with the same ->pos value as in the
|
|
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
|
|
index 2385e570e3311a..d0734aa1961a3e 100644
|
|
--- a/fs/smb/client/smb2ops.c
|
|
+++ b/fs/smb/client/smb2ops.c
|
|
@@ -2132,7 +2132,7 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
|
|
NULL, 0 /* no input data */, max_response_size,
|
|
(char **)&retbuf,
|
|
&ret_data_len);
|
|
- cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
|
|
+ cifs_dbg(FYI, "enum snapshots ioctl returned %d and ret buflen is %d\n",
|
|
rc, ret_data_len);
|
|
if (rc)
|
|
return rc;
|
|
@@ -3540,7 +3540,7 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
|
|
/*
|
|
* At this point, we are trying to fallocate an internal
|
|
* regions of a sparse file. Since smb2 does not have a
|
|
- * fallocate command we have two otions on how to emulate this.
|
|
+ * fallocate command we have two options on how to emulate this.
|
|
* We can either turn the entire file to become non-sparse
|
|
* which we only do if the fallocate is for virtually
|
|
* the whole file, or we can overwrite the region with zeroes
|
|
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
|
|
index 357abb0170c495..e58cad5d735a22 100644
|
|
--- a/fs/smb/client/smb2pdu.c
|
|
+++ b/fs/smb/client/smb2pdu.c
|
|
@@ -2989,7 +2989,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
|
|
|
|
SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
|
|
|
|
- /* Eventually save off posix specific response info and timestaps */
|
|
+ /* Eventually save off posix specific response info and timestamps */
|
|
|
|
err_free_rsp_buf:
|
|
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
|
|
@@ -4574,7 +4574,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
|
|
}
|
|
#ifdef CONFIG_CIFS_SMB_DIRECT
|
|
/*
|
|
- * If this rdata has a memmory registered, the MR can be freed
|
|
+ * If this rdata has a memory registered, the MR can be freed
|
|
* MR needs to be freed as soon as I/O finishes to prevent deadlock
|
|
* because they have limited number and are used for future I/Os
|
|
*/
|
|
diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
|
|
index 4a43802375b3a3..99081e9d6283a2 100644
|
|
--- a/fs/smb/client/smb2transport.c
|
|
+++ b/fs/smb/client/smb2transport.c
|
|
@@ -720,7 +720,7 @@ smb2_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
|
|
shdr->Command);
|
|
|
|
/*
|
|
- * Save off the origiginal signature so we can modify the smb and check
|
|
+ * Save off the original signature so we can modify the smb and check
|
|
* our calculated signature against what the server sent.
|
|
*/
|
|
memcpy(server_response_sig, shdr->Signature, SMB2_SIGNATURE_SIZE);
|
|
diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
|
|
index d74e829de51c22..48d020e1f663b5 100644
|
|
--- a/fs/smb/client/smbdirect.c
|
|
+++ b/fs/smb/client/smbdirect.c
|
|
@@ -164,10 +164,11 @@ static void smbd_disconnect_rdma_work(struct work_struct *work)
|
|
{
|
|
struct smbd_connection *info =
|
|
container_of(work, struct smbd_connection, disconnect_work);
|
|
+ struct smbdirect_socket *sc = &info->socket;
|
|
|
|
- if (info->transport_status == SMBD_CONNECTED) {
|
|
- info->transport_status = SMBD_DISCONNECTING;
|
|
- rdma_disconnect(info->id);
|
|
+ if (sc->status == SMBDIRECT_SOCKET_CONNECTED) {
|
|
+ sc->status = SMBDIRECT_SOCKET_DISCONNECTING;
|
|
+ rdma_disconnect(sc->rdma.cm_id);
|
|
}
|
|
}
|
|
|
|
@@ -181,6 +182,7 @@ static int smbd_conn_upcall(
|
|
struct rdma_cm_id *id, struct rdma_cm_event *event)
|
|
{
|
|
struct smbd_connection *info = id->context;
|
|
+ struct smbdirect_socket *sc = &info->socket;
|
|
|
|
log_rdma_event(INFO, "event=%d status=%d\n",
|
|
event->event, event->status);
|
|
@@ -204,7 +206,7 @@ static int smbd_conn_upcall(
|
|
|
|
case RDMA_CM_EVENT_ESTABLISHED:
|
|
log_rdma_event(INFO, "connected event=%d\n", event->event);
|
|
- info->transport_status = SMBD_CONNECTED;
|
|
+ sc->status = SMBDIRECT_SOCKET_CONNECTED;
|
|
wake_up_interruptible(&info->conn_wait);
|
|
break;
|
|
|
|
@@ -212,20 +214,20 @@ static int smbd_conn_upcall(
|
|
case RDMA_CM_EVENT_UNREACHABLE:
|
|
case RDMA_CM_EVENT_REJECTED:
|
|
log_rdma_event(INFO, "connecting failed event=%d\n", event->event);
|
|
- info->transport_status = SMBD_DISCONNECTED;
|
|
+ sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
|
|
wake_up_interruptible(&info->conn_wait);
|
|
break;
|
|
|
|
case RDMA_CM_EVENT_DEVICE_REMOVAL:
|
|
case RDMA_CM_EVENT_DISCONNECTED:
|
|
- /* This happenes when we fail the negotiation */
|
|
- if (info->transport_status == SMBD_NEGOTIATE_FAILED) {
|
|
- info->transport_status = SMBD_DISCONNECTED;
|
|
+ /* This happens when we fail the negotiation */
|
|
+ if (sc->status == SMBDIRECT_SOCKET_NEGOTIATE_FAILED) {
|
|
+ sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
|
|
wake_up(&info->conn_wait);
|
|
break;
|
|
}
|
|
|
|
- info->transport_status = SMBD_DISCONNECTED;
|
|
+ sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
|
|
wake_up_interruptible(&info->disconn_wait);
|
|
wake_up_interruptible(&info->wait_reassembly_queue);
|
|
wake_up_interruptible_all(&info->wait_send_queue);
|
|
@@ -274,6 +276,8 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
|
|
int i;
|
|
struct smbd_request *request =
|
|
container_of(wc->wr_cqe, struct smbd_request, cqe);
|
|
+ struct smbd_connection *info = request->info;
|
|
+ struct smbdirect_socket *sc = &info->socket;
|
|
|
|
log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n",
|
|
request, wc->status);
|
|
@@ -285,7 +289,7 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
|
|
}
|
|
|
|
for (i = 0; i < request->num_sge; i++)
|
|
- ib_dma_unmap_single(request->info->id->device,
|
|
+ ib_dma_unmap_single(sc->ib.dev,
|
|
request->sge[i].addr,
|
|
request->sge[i].length,
|
|
DMA_TO_DEVICE);
|
|
@@ -392,8 +396,9 @@ static void smbd_post_send_credits(struct work_struct *work)
|
|
struct smbd_connection *info =
|
|
container_of(work, struct smbd_connection,
|
|
post_send_credits_work);
|
|
+ struct smbdirect_socket *sc = &info->socket;
|
|
|
|
- if (info->transport_status != SMBD_CONNECTED) {
|
|
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
|
|
wake_up(&info->wait_receive_queues);
|
|
return;
|
|
}
|
|
@@ -460,7 +465,6 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
|
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
|
|
log_rdma_recv(INFO, "wc->status=%d opcode=%d\n",
|
|
wc->status, wc->opcode);
|
|
- smbd_disconnect_rdma_connection(info);
|
|
goto error;
|
|
}
|
|
|
|
@@ -477,8 +481,9 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
|
info->full_packet_received = true;
|
|
info->negotiate_done =
|
|
process_negotiation_response(response, wc->byte_len);
|
|
+ put_receive_buffer(info, response);
|
|
complete(&info->negotiate_completion);
|
|
- break;
|
|
+ return;
|
|
|
|
/* SMBD data transfer packet */
|
|
case SMBD_TRANSFER_DATA:
|
|
@@ -535,14 +540,16 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
|
}
|
|
|
|
return;
|
|
-
|
|
- default:
|
|
- log_rdma_recv(ERR,
|
|
- "unexpected response type=%d\n", response->type);
|
|
}
|
|
|
|
+ /*
|
|
+ * This is an internal error!
|
|
+ */
|
|
+ log_rdma_recv(ERR, "unexpected response type=%d\n", response->type);
|
|
+ WARN_ON_ONCE(response->type != SMBD_TRANSFER_DATA);
|
|
error:
|
|
put_receive_buffer(info, response);
|
|
+ smbd_disconnect_rdma_connection(info);
|
|
}
|
|
|
|
static struct rdma_cm_id *smbd_create_id(
|
|
@@ -634,32 +641,34 @@ static int smbd_ia_open(
|
|
struct smbd_connection *info,
|
|
struct sockaddr *dstaddr, int port)
|
|
{
|
|
+ struct smbdirect_socket *sc = &info->socket;
|
|
int rc;
|
|
|
|
- info->id = smbd_create_id(info, dstaddr, port);
|
|
- if (IS_ERR(info->id)) {
|
|
- rc = PTR_ERR(info->id);
|
|
+ sc->rdma.cm_id = smbd_create_id(info, dstaddr, port);
|
|
+ if (IS_ERR(sc->rdma.cm_id)) {
|
|
+ rc = PTR_ERR(sc->rdma.cm_id);
|
|
goto out1;
|
|
}
|
|
+ sc->ib.dev = sc->rdma.cm_id->device;
|
|
|
|
- if (!frwr_is_supported(&info->id->device->attrs)) {
|
|
+ if (!frwr_is_supported(&sc->ib.dev->attrs)) {
|
|
log_rdma_event(ERR, "Fast Registration Work Requests (FRWR) is not supported\n");
|
|
log_rdma_event(ERR, "Device capability flags = %llx max_fast_reg_page_list_len = %u\n",
|
|
- info->id->device->attrs.device_cap_flags,
|
|
- info->id->device->attrs.max_fast_reg_page_list_len);
|
|
+ sc->ib.dev->attrs.device_cap_flags,
|
|
+ sc->ib.dev->attrs.max_fast_reg_page_list_len);
|
|
rc = -EPROTONOSUPPORT;
|
|
goto out2;
|
|
}
|
|
info->max_frmr_depth = min_t(int,
|
|
smbd_max_frmr_depth,
|
|
- info->id->device->attrs.max_fast_reg_page_list_len);
|
|
+ sc->ib.dev->attrs.max_fast_reg_page_list_len);
|
|
info->mr_type = IB_MR_TYPE_MEM_REG;
|
|
- if (info->id->device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
|
|
+ if (sc->ib.dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
|
|
info->mr_type = IB_MR_TYPE_SG_GAPS;
|
|
|
|
- info->pd = ib_alloc_pd(info->id->device, 0);
|
|
- if (IS_ERR(info->pd)) {
|
|
- rc = PTR_ERR(info->pd);
|
|
+ sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0);
|
|
+ if (IS_ERR(sc->ib.pd)) {
|
|
+ rc = PTR_ERR(sc->ib.pd);
|
|
log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc);
|
|
goto out2;
|
|
}
|
|
@@ -667,8 +676,8 @@ static int smbd_ia_open(
|
|
return 0;
|
|
|
|
out2:
|
|
- rdma_destroy_id(info->id);
|
|
- info->id = NULL;
|
|
+ rdma_destroy_id(sc->rdma.cm_id);
|
|
+ sc->rdma.cm_id = NULL;
|
|
|
|
out1:
|
|
return rc;
|
|
@@ -682,6 +691,7 @@ static int smbd_ia_open(
|
|
*/
|
|
static int smbd_post_send_negotiate_req(struct smbd_connection *info)
|
|
{
|
|
+ struct smbdirect_socket *sc = &info->socket;
|
|
struct ib_send_wr send_wr;
|
|
int rc = -ENOMEM;
|
|
struct smbd_request *request;
|
|
@@ -705,18 +715,18 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
|
|
|
|
request->num_sge = 1;
|
|
request->sge[0].addr = ib_dma_map_single(
|
|
- info->id->device, (void *)packet,
|
|
+ sc->ib.dev, (void *)packet,
|
|
sizeof(*packet), DMA_TO_DEVICE);
|
|
- if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
|
|
+ if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) {
|
|
rc = -EIO;
|
|
goto dma_mapping_failed;
|
|
}
|
|
|
|
request->sge[0].length = sizeof(*packet);
|
|
- request->sge[0].lkey = info->pd->local_dma_lkey;
|
|
+ request->sge[0].lkey = sc->ib.pd->local_dma_lkey;
|
|
|
|
ib_dma_sync_single_for_device(
|
|
- info->id->device, request->sge[0].addr,
|
|
+ sc->ib.dev, request->sge[0].addr,
|
|
request->sge[0].length, DMA_TO_DEVICE);
|
|
|
|
request->cqe.done = send_done;
|
|
@@ -733,14 +743,14 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
|
|
request->sge[0].length, request->sge[0].lkey);
|
|
|
|
atomic_inc(&info->send_pending);
|
|
- rc = ib_post_send(info->id->qp, &send_wr, NULL);
|
|
+ rc = ib_post_send(sc->ib.qp, &send_wr, NULL);
|
|
if (!rc)
|
|
return 0;
|
|
|
|
/* if we reach here, post send failed */
|
|
log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
|
|
atomic_dec(&info->send_pending);
|
|
- ib_dma_unmap_single(info->id->device, request->sge[0].addr,
|
|
+ ib_dma_unmap_single(sc->ib.dev, request->sge[0].addr,
|
|
request->sge[0].length, DMA_TO_DEVICE);
|
|
|
|
smbd_disconnect_rdma_connection(info);
|
|
@@ -792,6 +802,7 @@ static int manage_keep_alive_before_sending(struct smbd_connection *info)
|
|
static int smbd_post_send(struct smbd_connection *info,
|
|
struct smbd_request *request)
|
|
{
|
|
+ struct smbdirect_socket *sc = &info->socket;
|
|
struct ib_send_wr send_wr;
|
|
int rc, i;
|
|
|
|
@@ -800,7 +811,7 @@ static int smbd_post_send(struct smbd_connection *info,
|
|
"rdma_request sge[%d] addr=0x%llx length=%u\n",
|
|
i, request->sge[i].addr, request->sge[i].length);
|
|
ib_dma_sync_single_for_device(
|
|
- info->id->device,
|
|
+ sc->ib.dev,
|
|
request->sge[i].addr,
|
|
request->sge[i].length,
|
|
DMA_TO_DEVICE);
|
|
@@ -815,7 +826,7 @@ static int smbd_post_send(struct smbd_connection *info,
|
|
send_wr.opcode = IB_WR_SEND;
|
|
send_wr.send_flags = IB_SEND_SIGNALED;
|
|
|
|
- rc = ib_post_send(info->id->qp, &send_wr, NULL);
|
|
+ rc = ib_post_send(sc->ib.qp, &send_wr, NULL);
|
|
if (rc) {
|
|
log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
|
|
smbd_disconnect_rdma_connection(info);
|
|
@@ -832,6 +843,7 @@ static int smbd_post_send_iter(struct smbd_connection *info,
|
|
struct iov_iter *iter,
|
|
int *_remaining_data_length)
|
|
{
|
|
+ struct smbdirect_socket *sc = &info->socket;
|
|
int i, rc;
|
|
int header_length;
|
|
int data_length;
|
|
@@ -843,11 +855,11 @@ static int smbd_post_send_iter(struct smbd_connection *info,
|
|
/* Wait for send credits. A SMBD packet needs one credit */
|
|
rc = wait_event_interruptible(info->wait_send_queue,
|
|
atomic_read(&info->send_credits) > 0 ||
|
|
- info->transport_status != SMBD_CONNECTED);
|
|
+ sc->status != SMBDIRECT_SOCKET_CONNECTED);
|
|
if (rc)
|
|
goto err_wait_credit;
|
|
|
|
- if (info->transport_status != SMBD_CONNECTED) {
|
|
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
|
|
log_outgoing(ERR, "disconnected not sending on wait_credit\n");
|
|
rc = -EAGAIN;
|
|
goto err_wait_credit;
|
|
@@ -860,9 +872,9 @@ static int smbd_post_send_iter(struct smbd_connection *info,
|
|
wait_send_queue:
|
|
wait_event(info->wait_post_send,
|
|
atomic_read(&info->send_pending) < info->send_credit_target ||
|
|
- info->transport_status != SMBD_CONNECTED);
|
|
+ sc->status != SMBDIRECT_SOCKET_CONNECTED);
|
|
|
|
- if (info->transport_status != SMBD_CONNECTED) {
|
|
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
|
|
log_outgoing(ERR, "disconnected not sending on wait_send_queue\n");
|
|
rc = -EAGAIN;
|
|
goto err_wait_send_queue;
|
|
@@ -889,8 +901,8 @@ static int smbd_post_send_iter(struct smbd_connection *info,
|
|
.nr_sge = 1,
|
|
.max_sge = SMBDIRECT_MAX_SEND_SGE,
|
|
.sge = request->sge,
|
|
- .device = info->id->device,
|
|
- .local_dma_lkey = info->pd->local_dma_lkey,
|
|
+ .device = sc->ib.dev,
|
|
+ .local_dma_lkey = sc->ib.pd->local_dma_lkey,
|
|
.direction = DMA_TO_DEVICE,
|
|
};
|
|
|
|
@@ -942,18 +954,18 @@ static int smbd_post_send_iter(struct smbd_connection *info,
|
|
if (!data_length)
|
|
header_length = offsetof(struct smbd_data_transfer, padding);
|
|
|
|
- request->sge[0].addr = ib_dma_map_single(info->id->device,
|
|
+ request->sge[0].addr = ib_dma_map_single(sc->ib.dev,
|
|
(void *)packet,
|
|
header_length,
|
|
DMA_TO_DEVICE);
|
|
- if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
|
|
+ if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) {
|
|
rc = -EIO;
|
|
request->sge[0].addr = 0;
|
|
goto err_dma;
|
|
}
|
|
|
|
request->sge[0].length = header_length;
|
|
- request->sge[0].lkey = info->pd->local_dma_lkey;
|
|
+ request->sge[0].lkey = sc->ib.pd->local_dma_lkey;
|
|
|
|
rc = smbd_post_send(info, request);
|
|
if (!rc)
|
|
@@ -962,7 +974,7 @@ static int smbd_post_send_iter(struct smbd_connection *info,
|
|
err_dma:
|
|
for (i = 0; i < request->num_sge; i++)
|
|
if (request->sge[i].addr)
|
|
- ib_dma_unmap_single(info->id->device,
|
|
+ ib_dma_unmap_single(sc->ib.dev,
|
|
request->sge[i].addr,
|
|
request->sge[i].length,
|
|
DMA_TO_DEVICE);
|
|
@@ -1007,17 +1019,18 @@ static int smbd_post_send_empty(struct smbd_connection *info)
|
|
static int smbd_post_recv(
|
|
struct smbd_connection *info, struct smbd_response *response)
|
|
{
|
|
+ struct smbdirect_socket *sc = &info->socket;
|
|
struct ib_recv_wr recv_wr;
|
|
int rc = -EIO;
|
|
|
|
response->sge.addr = ib_dma_map_single(
|
|
- info->id->device, response->packet,
|
|
+ sc->ib.dev, response->packet,
|
|
info->max_receive_size, DMA_FROM_DEVICE);
|
|
- if (ib_dma_mapping_error(info->id->device, response->sge.addr))
|
|
+ if (ib_dma_mapping_error(sc->ib.dev, response->sge.addr))
|
|
return rc;
|
|
|
|
response->sge.length = info->max_receive_size;
|
|
- response->sge.lkey = info->pd->local_dma_lkey;
|
|
+ response->sge.lkey = sc->ib.pd->local_dma_lkey;
|
|
|
|
response->cqe.done = recv_done;
|
|
|
|
@@ -1026,10 +1039,11 @@ static int smbd_post_recv(
|
|
recv_wr.sg_list = &response->sge;
|
|
recv_wr.num_sge = 1;
|
|
|
|
- rc = ib_post_recv(info->id->qp, &recv_wr, NULL);
|
|
+ rc = ib_post_recv(sc->ib.qp, &recv_wr, NULL);
|
|
if (rc) {
|
|
- ib_dma_unmap_single(info->id->device, response->sge.addr,
|
|
+ ib_dma_unmap_single(sc->ib.dev, response->sge.addr,
|
|
response->sge.length, DMA_FROM_DEVICE);
|
|
+ response->sge.length = 0;
|
|
smbd_disconnect_rdma_connection(info);
|
|
log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
|
|
}
|
|
@@ -1186,10 +1200,16 @@ static struct smbd_response *get_receive_buffer(struct smbd_connection *info)
|
|
static void put_receive_buffer(
|
|
struct smbd_connection *info, struct smbd_response *response)
|
|
{
|
|
+ struct smbdirect_socket *sc = &info->socket;
|
|
unsigned long flags;
|
|
|
|
- ib_dma_unmap_single(info->id->device, response->sge.addr,
|
|
- response->sge.length, DMA_FROM_DEVICE);
|
|
+ if (likely(response->sge.length != 0)) {
|
|
+ ib_dma_unmap_single(sc->ib.dev,
|
|
+ response->sge.addr,
|
|
+ response->sge.length,
|
|
+ DMA_FROM_DEVICE);
|
|
+ response->sge.length = 0;
|
|
+ }
|
|
|
|
spin_lock_irqsave(&info->receive_queue_lock, flags);
|
|
list_add_tail(&response->list, &info->receive_queue);
|
|
@@ -1227,6 +1247,7 @@ static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
|
|
goto allocate_failed;
|
|
|
|
response->info = info;
|
|
+ response->sge.length = 0;
|
|
list_add_tail(&response->list, &info->receive_queue);
|
|
info->count_receive_queue++;
|
|
}
|
|
@@ -1288,6 +1309,7 @@ static void idle_connection_timer(struct work_struct *work)
|
|
void smbd_destroy(struct TCP_Server_Info *server)
|
|
{
|
|
struct smbd_connection *info = server->smbd_conn;
|
|
+ struct smbdirect_socket *sc;
|
|
struct smbd_response *response;
|
|
unsigned long flags;
|
|
|
|
@@ -1295,19 +1317,21 @@ void smbd_destroy(struct TCP_Server_Info *server)
|
|
log_rdma_event(INFO, "rdma session already destroyed\n");
|
|
return;
|
|
}
|
|
+ sc = &info->socket;
|
|
|
|
log_rdma_event(INFO, "destroying rdma session\n");
|
|
- if (info->transport_status != SMBD_DISCONNECTED) {
|
|
- rdma_disconnect(server->smbd_conn->id);
|
|
+ if (sc->status != SMBDIRECT_SOCKET_DISCONNECTED) {
|
|
+ rdma_disconnect(sc->rdma.cm_id);
|
|
log_rdma_event(INFO, "wait for transport being disconnected\n");
|
|
wait_event_interruptible(
|
|
info->disconn_wait,
|
|
- info->transport_status == SMBD_DISCONNECTED);
|
|
+ sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
|
|
}
|
|
|
|
log_rdma_event(INFO, "destroying qp\n");
|
|
- ib_drain_qp(info->id->qp);
|
|
- rdma_destroy_qp(info->id);
|
|
+ ib_drain_qp(sc->ib.qp);
|
|
+ rdma_destroy_qp(sc->rdma.cm_id);
|
|
+ sc->ib.qp = NULL;
|
|
|
|
log_rdma_event(INFO, "cancelling idle timer\n");
|
|
cancel_delayed_work_sync(&info->idle_timer_work);
|
|
@@ -1343,7 +1367,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
|
|
* are not locked by srv_mutex. It is possible some processes are
|
|
* blocked on transport srv_mutex while holding memory registration.
|
|
* Release the transport srv_mutex to allow them to hit the failure
|
|
- * path when sending data, and then release memory registartions.
|
|
+ * path when sending data, and then release memory registrations.
|
|
*/
|
|
log_rdma_event(INFO, "freeing mr list\n");
|
|
wake_up_interruptible_all(&info->wait_mr);
|
|
@@ -1354,10 +1378,10 @@ void smbd_destroy(struct TCP_Server_Info *server)
|
|
}
|
|
destroy_mr_list(info);
|
|
|
|
- ib_free_cq(info->send_cq);
|
|
- ib_free_cq(info->recv_cq);
|
|
- ib_dealloc_pd(info->pd);
|
|
- rdma_destroy_id(info->id);
|
|
+ ib_free_cq(sc->ib.send_cq);
|
|
+ ib_free_cq(sc->ib.recv_cq);
|
|
+ ib_dealloc_pd(sc->ib.pd);
|
|
+ rdma_destroy_id(sc->rdma.cm_id);
|
|
|
|
/* free mempools */
|
|
mempool_destroy(info->request_mempool);
|
|
@@ -1366,7 +1390,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
|
|
mempool_destroy(info->response_mempool);
|
|
kmem_cache_destroy(info->response_cache);
|
|
|
|
- info->transport_status = SMBD_DESTROYED;
|
|
+ sc->status = SMBDIRECT_SOCKET_DESTROYED;
|
|
|
|
destroy_workqueue(info->workqueue);
|
|
log_rdma_event(INFO, "rdma session destroyed\n");
|
|
@@ -1391,7 +1415,7 @@ int smbd_reconnect(struct TCP_Server_Info *server)
|
|
* This is possible if transport is disconnected and we haven't received
|
|
* notification from RDMA, but upper layer has detected timeout
|
|
*/
|
|
- if (server->smbd_conn->transport_status == SMBD_CONNECTED) {
|
|
+ if (server->smbd_conn->socket.status == SMBDIRECT_SOCKET_CONNECTED) {
|
|
log_rdma_event(INFO, "disconnecting transport\n");
|
|
smbd_destroy(server);
|
|
}
|
|
@@ -1490,6 +1514,7 @@ static struct smbd_connection *_smbd_get_connection(
|
|
{
|
|
int rc;
|
|
struct smbd_connection *info;
|
|
+ struct smbdirect_socket *sc;
|
|
struct rdma_conn_param conn_param;
|
|
struct ib_qp_init_attr qp_attr;
|
|
struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
|
|
@@ -1499,29 +1524,30 @@ static struct smbd_connection *_smbd_get_connection(
|
|
info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL);
|
|
if (!info)
|
|
return NULL;
|
|
+ sc = &info->socket;
|
|
|
|
- info->transport_status = SMBD_CONNECTING;
|
|
+ sc->status = SMBDIRECT_SOCKET_CONNECTING;
|
|
rc = smbd_ia_open(info, dstaddr, port);
|
|
if (rc) {
|
|
log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc);
|
|
goto create_id_failed;
|
|
}
|
|
|
|
- if (smbd_send_credit_target > info->id->device->attrs.max_cqe ||
|
|
- smbd_send_credit_target > info->id->device->attrs.max_qp_wr) {
|
|
+ if (smbd_send_credit_target > sc->ib.dev->attrs.max_cqe ||
|
|
+ smbd_send_credit_target > sc->ib.dev->attrs.max_qp_wr) {
|
|
log_rdma_event(ERR, "consider lowering send_credit_target = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
|
|
smbd_send_credit_target,
|
|
- info->id->device->attrs.max_cqe,
|
|
- info->id->device->attrs.max_qp_wr);
|
|
+ sc->ib.dev->attrs.max_cqe,
|
|
+ sc->ib.dev->attrs.max_qp_wr);
|
|
goto config_failed;
|
|
}
|
|
|
|
- if (smbd_receive_credit_max > info->id->device->attrs.max_cqe ||
|
|
- smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) {
|
|
+ if (smbd_receive_credit_max > sc->ib.dev->attrs.max_cqe ||
|
|
+ smbd_receive_credit_max > sc->ib.dev->attrs.max_qp_wr) {
|
|
log_rdma_event(ERR, "consider lowering receive_credit_max = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
|
|
smbd_receive_credit_max,
|
|
- info->id->device->attrs.max_cqe,
|
|
- info->id->device->attrs.max_qp_wr);
|
|
+ sc->ib.dev->attrs.max_cqe,
|
|
+ sc->ib.dev->attrs.max_qp_wr);
|
|
goto config_failed;
|
|
}
|
|
|
|
@@ -1532,32 +1558,30 @@ static struct smbd_connection *_smbd_get_connection(
|
|
info->max_receive_size = smbd_max_receive_size;
|
|
info->keep_alive_interval = smbd_keep_alive_interval;
|
|
|
|
- if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE ||
|
|
- info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) {
|
|
+ if (sc->ib.dev->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE ||
|
|
+ sc->ib.dev->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) {
|
|
log_rdma_event(ERR,
|
|
"device %.*s max_send_sge/max_recv_sge = %d/%d too small\n",
|
|
IB_DEVICE_NAME_MAX,
|
|
- info->id->device->name,
|
|
- info->id->device->attrs.max_send_sge,
|
|
- info->id->device->attrs.max_recv_sge);
|
|
+ sc->ib.dev->name,
|
|
+ sc->ib.dev->attrs.max_send_sge,
|
|
+ sc->ib.dev->attrs.max_recv_sge);
|
|
goto config_failed;
|
|
}
|
|
|
|
- info->send_cq = NULL;
|
|
- info->recv_cq = NULL;
|
|
- info->send_cq =
|
|
- ib_alloc_cq_any(info->id->device, info,
|
|
+ sc->ib.send_cq =
|
|
+ ib_alloc_cq_any(sc->ib.dev, info,
|
|
info->send_credit_target, IB_POLL_SOFTIRQ);
|
|
- if (IS_ERR(info->send_cq)) {
|
|
- info->send_cq = NULL;
|
|
+ if (IS_ERR(sc->ib.send_cq)) {
|
|
+ sc->ib.send_cq = NULL;
|
|
goto alloc_cq_failed;
|
|
}
|
|
|
|
- info->recv_cq =
|
|
- ib_alloc_cq_any(info->id->device, info,
|
|
+ sc->ib.recv_cq =
|
|
+ ib_alloc_cq_any(sc->ib.dev, info,
|
|
info->receive_credit_max, IB_POLL_SOFTIRQ);
|
|
- if (IS_ERR(info->recv_cq)) {
|
|
- info->recv_cq = NULL;
|
|
+ if (IS_ERR(sc->ib.recv_cq)) {
|
|
+ sc->ib.recv_cq = NULL;
|
|
goto alloc_cq_failed;
|
|
}
|
|
|
|
@@ -1571,31 +1595,30 @@ static struct smbd_connection *_smbd_get_connection(
|
|
qp_attr.cap.max_inline_data = 0;
|
|
qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
|
|
qp_attr.qp_type = IB_QPT_RC;
|
|
- qp_attr.send_cq = info->send_cq;
|
|
- qp_attr.recv_cq = info->recv_cq;
|
|
+ qp_attr.send_cq = sc->ib.send_cq;
|
|
+ qp_attr.recv_cq = sc->ib.recv_cq;
|
|
qp_attr.port_num = ~0;
|
|
|
|
- rc = rdma_create_qp(info->id, info->pd, &qp_attr);
|
|
+ rc = rdma_create_qp(sc->rdma.cm_id, sc->ib.pd, &qp_attr);
|
|
if (rc) {
|
|
log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc);
|
|
goto create_qp_failed;
|
|
}
|
|
+ sc->ib.qp = sc->rdma.cm_id->qp;
|
|
|
|
memset(&conn_param, 0, sizeof(conn_param));
|
|
conn_param.initiator_depth = 0;
|
|
|
|
conn_param.responder_resources =
|
|
- info->id->device->attrs.max_qp_rd_atom
|
|
- < SMBD_CM_RESPONDER_RESOURCES ?
|
|
- info->id->device->attrs.max_qp_rd_atom :
|
|
- SMBD_CM_RESPONDER_RESOURCES;
|
|
+ min(sc->ib.dev->attrs.max_qp_rd_atom,
|
|
+ SMBD_CM_RESPONDER_RESOURCES);
|
|
info->responder_resources = conn_param.responder_resources;
|
|
log_rdma_mr(INFO, "responder_resources=%d\n",
|
|
info->responder_resources);
|
|
|
|
/* Need to send IRD/ORD in private data for iWARP */
|
|
- info->id->device->ops.get_port_immutable(
|
|
- info->id->device, info->id->port_num, &port_immutable);
|
|
+ sc->ib.dev->ops.get_port_immutable(
|
|
+ sc->ib.dev, sc->rdma.cm_id->port_num, &port_immutable);
|
|
if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
|
|
ird_ord_hdr[0] = info->responder_resources;
|
|
ird_ord_hdr[1] = 1;
|
|
@@ -1616,16 +1639,18 @@ static struct smbd_connection *_smbd_get_connection(
|
|
init_waitqueue_head(&info->conn_wait);
|
|
init_waitqueue_head(&info->disconn_wait);
|
|
init_waitqueue_head(&info->wait_reassembly_queue);
|
|
- rc = rdma_connect(info->id, &conn_param);
|
|
+ rc = rdma_connect(sc->rdma.cm_id, &conn_param);
|
|
if (rc) {
|
|
log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
|
|
goto rdma_connect_failed;
|
|
}
|
|
|
|
- wait_event_interruptible(
|
|
- info->conn_wait, info->transport_status != SMBD_CONNECTING);
|
|
+ wait_event_interruptible_timeout(
|
|
+ info->conn_wait,
|
|
+ sc->status != SMBDIRECT_SOCKET_CONNECTING,
|
|
+ msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
|
|
|
|
- if (info->transport_status != SMBD_CONNECTED) {
|
|
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
|
|
log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
|
|
goto rdma_connect_failed;
|
|
}
|
|
@@ -1676,26 +1701,26 @@ static struct smbd_connection *_smbd_get_connection(
|
|
negotiation_failed:
|
|
cancel_delayed_work_sync(&info->idle_timer_work);
|
|
destroy_caches_and_workqueue(info);
|
|
- info->transport_status = SMBD_NEGOTIATE_FAILED;
|
|
+ sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED;
|
|
init_waitqueue_head(&info->conn_wait);
|
|
- rdma_disconnect(info->id);
|
|
+ rdma_disconnect(sc->rdma.cm_id);
|
|
wait_event(info->conn_wait,
|
|
- info->transport_status == SMBD_DISCONNECTED);
|
|
+ sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
|
|
|
|
allocate_cache_failed:
|
|
rdma_connect_failed:
|
|
- rdma_destroy_qp(info->id);
|
|
+ rdma_destroy_qp(sc->rdma.cm_id);
|
|
|
|
create_qp_failed:
|
|
alloc_cq_failed:
|
|
- if (info->send_cq)
|
|
- ib_free_cq(info->send_cq);
|
|
- if (info->recv_cq)
|
|
- ib_free_cq(info->recv_cq);
|
|
+ if (sc->ib.send_cq)
|
|
+ ib_free_cq(sc->ib.send_cq);
|
|
+ if (sc->ib.recv_cq)
|
|
+ ib_free_cq(sc->ib.recv_cq);
|
|
|
|
config_failed:
|
|
- ib_dealloc_pd(info->pd);
|
|
- rdma_destroy_id(info->id);
|
|
+ ib_dealloc_pd(sc->ib.pd);
|
|
+ rdma_destroy_id(sc->rdma.cm_id);
|
|
|
|
create_id_failed:
|
|
kfree(info);
|
|
@@ -1735,6 +1760,7 @@ struct smbd_connection *smbd_get_connection(
|
|
static int smbd_recv_buf(struct smbd_connection *info, char *buf,
|
|
unsigned int size)
|
|
{
|
|
+ struct smbdirect_socket *sc = &info->socket;
|
|
struct smbd_response *response;
|
|
struct smbd_data_transfer *data_transfer;
|
|
int to_copy, to_read, data_read, offset;
|
|
@@ -1849,12 +1875,12 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf,
|
|
rc = wait_event_interruptible(
|
|
info->wait_reassembly_queue,
|
|
info->reassembly_data_length >= size ||
|
|
- info->transport_status != SMBD_CONNECTED);
|
|
+ sc->status != SMBDIRECT_SOCKET_CONNECTED);
|
|
/* Don't return any data if interrupted */
|
|
if (rc)
|
|
return rc;
|
|
|
|
- if (info->transport_status != SMBD_CONNECTED) {
|
|
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
|
|
log_read(ERR, "disconnected\n");
|
|
return -ECONNABORTED;
|
|
}
|
|
@@ -1872,6 +1898,7 @@ static int smbd_recv_page(struct smbd_connection *info,
|
|
struct page *page, unsigned int page_offset,
|
|
unsigned int to_read)
|
|
{
|
|
+ struct smbdirect_socket *sc = &info->socket;
|
|
int ret;
|
|
char *to_address;
|
|
void *page_address;
|
|
@@ -1880,7 +1907,7 @@ static int smbd_recv_page(struct smbd_connection *info,
|
|
ret = wait_event_interruptible(
|
|
info->wait_reassembly_queue,
|
|
info->reassembly_data_length >= to_read ||
|
|
- info->transport_status != SMBD_CONNECTED);
|
|
+ sc->status != SMBDIRECT_SOCKET_CONNECTED);
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -1955,12 +1982,13 @@ int smbd_send(struct TCP_Server_Info *server,
|
|
int num_rqst, struct smb_rqst *rqst_array)
|
|
{
|
|
struct smbd_connection *info = server->smbd_conn;
|
|
+ struct smbdirect_socket *sc = &info->socket;
|
|
struct smb_rqst *rqst;
|
|
struct iov_iter iter;
|
|
unsigned int remaining_data_length, klen;
|
|
int rc, i, rqst_idx;
|
|
|
|
- if (info->transport_status != SMBD_CONNECTED)
|
|
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
|
|
return -EAGAIN;
|
|
|
|
/*
|
|
@@ -2054,6 +2082,7 @@ static void smbd_mr_recovery_work(struct work_struct *work)
|
|
{
|
|
struct smbd_connection *info =
|
|
container_of(work, struct smbd_connection, mr_recovery_work);
|
|
+ struct smbdirect_socket *sc = &info->socket;
|
|
struct smbd_mr *smbdirect_mr;
|
|
int rc;
|
|
|
|
@@ -2071,7 +2100,7 @@ static void smbd_mr_recovery_work(struct work_struct *work)
|
|
}
|
|
|
|
smbdirect_mr->mr = ib_alloc_mr(
|
|
- info->pd, info->mr_type,
|
|
+ sc->ib.pd, info->mr_type,
|
|
info->max_frmr_depth);
|
|
if (IS_ERR(smbdirect_mr->mr)) {
|
|
log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
|
|
@@ -2100,12 +2129,13 @@ static void smbd_mr_recovery_work(struct work_struct *work)
|
|
|
|
static void destroy_mr_list(struct smbd_connection *info)
|
|
{
|
|
+ struct smbdirect_socket *sc = &info->socket;
|
|
struct smbd_mr *mr, *tmp;
|
|
|
|
cancel_work_sync(&info->mr_recovery_work);
|
|
list_for_each_entry_safe(mr, tmp, &info->mr_list, list) {
|
|
if (mr->state == MR_INVALIDATED)
|
|
- ib_dma_unmap_sg(info->id->device, mr->sgt.sgl,
|
|
+ ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl,
|
|
mr->sgt.nents, mr->dir);
|
|
ib_dereg_mr(mr->mr);
|
|
kfree(mr->sgt.sgl);
|
|
@@ -2122,6 +2152,7 @@ static void destroy_mr_list(struct smbd_connection *info)
|
|
*/
|
|
static int allocate_mr_list(struct smbd_connection *info)
|
|
{
|
|
+ struct smbdirect_socket *sc = &info->socket;
|
|
int i;
|
|
struct smbd_mr *smbdirect_mr, *tmp;
|
|
|
|
@@ -2137,7 +2168,7 @@ static int allocate_mr_list(struct smbd_connection *info)
|
|
smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
|
|
if (!smbdirect_mr)
|
|
goto cleanup_entries;
|
|
- smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type,
|
|
+ smbdirect_mr->mr = ib_alloc_mr(sc->ib.pd, info->mr_type,
|
|
info->max_frmr_depth);
|
|
if (IS_ERR(smbdirect_mr->mr)) {
|
|
log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
|
|
@@ -2182,20 +2213,20 @@ static int allocate_mr_list(struct smbd_connection *info)
|
|
*/
|
|
static struct smbd_mr *get_mr(struct smbd_connection *info)
|
|
{
|
|
+ struct smbdirect_socket *sc = &info->socket;
|
|
struct smbd_mr *ret;
|
|
int rc;
|
|
again:
|
|
rc = wait_event_interruptible(info->wait_mr,
|
|
atomic_read(&info->mr_ready_count) ||
|
|
- info->transport_status != SMBD_CONNECTED);
|
|
+ sc->status != SMBDIRECT_SOCKET_CONNECTED);
|
|
if (rc) {
|
|
log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc);
|
|
return NULL;
|
|
}
|
|
|
|
- if (info->transport_status != SMBD_CONNECTED) {
|
|
- log_rdma_mr(ERR, "info->transport_status=%x\n",
|
|
- info->transport_status);
|
|
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
|
|
+ log_rdma_mr(ERR, "sc->status=%x\n", sc->status);
|
|
return NULL;
|
|
}
|
|
|
|
@@ -2248,6 +2279,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
|
|
struct iov_iter *iter,
|
|
bool writing, bool need_invalidate)
|
|
{
|
|
+ struct smbdirect_socket *sc = &info->socket;
|
|
struct smbd_mr *smbdirect_mr;
|
|
int rc, num_pages;
|
|
enum dma_data_direction dir;
|
|
@@ -2277,7 +2309,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
|
|
num_pages, iov_iter_count(iter), info->max_frmr_depth);
|
|
smbd_iter_to_mr(info, iter, &smbdirect_mr->sgt, info->max_frmr_depth);
|
|
|
|
- rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgt.sgl,
|
|
+ rc = ib_dma_map_sg(sc->ib.dev, smbdirect_mr->sgt.sgl,
|
|
smbdirect_mr->sgt.nents, dir);
|
|
if (!rc) {
|
|
log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
|
|
@@ -2313,7 +2345,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
|
|
* on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
|
|
* on the next ib_post_send when we actaully send I/O to remote peer
|
|
*/
|
|
- rc = ib_post_send(info->id->qp, ®_wr->wr, NULL);
|
|
+ rc = ib_post_send(sc->ib.qp, ®_wr->wr, NULL);
|
|
if (!rc)
|
|
return smbdirect_mr;
|
|
|
|
@@ -2322,7 +2354,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
|
|
|
|
/* If all failed, attempt to recover this MR by setting it MR_ERROR*/
|
|
map_mr_error:
|
|
- ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgt.sgl,
|
|
+ ib_dma_unmap_sg(sc->ib.dev, smbdirect_mr->sgt.sgl,
|
|
smbdirect_mr->sgt.nents, smbdirect_mr->dir);
|
|
|
|
dma_map_error:
|
|
@@ -2360,6 +2392,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
|
|
{
|
|
struct ib_send_wr *wr;
|
|
struct smbd_connection *info = smbdirect_mr->conn;
|
|
+ struct smbdirect_socket *sc = &info->socket;
|
|
int rc = 0;
|
|
|
|
if (smbdirect_mr->need_invalidate) {
|
|
@@ -2373,7 +2406,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
|
|
wr->send_flags = IB_SEND_SIGNALED;
|
|
|
|
init_completion(&smbdirect_mr->invalidate_done);
|
|
- rc = ib_post_send(info->id->qp, wr, NULL);
|
|
+ rc = ib_post_send(sc->ib.qp, wr, NULL);
|
|
if (rc) {
|
|
log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
|
|
smbd_disconnect_rdma_connection(info);
|
|
@@ -2390,7 +2423,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
|
|
|
|
if (smbdirect_mr->state == MR_INVALIDATED) {
|
|
ib_dma_unmap_sg(
|
|
- info->id->device, smbdirect_mr->sgt.sgl,
|
|
+ sc->ib.dev, smbdirect_mr->sgt.sgl,
|
|
smbdirect_mr->sgt.nents,
|
|
smbdirect_mr->dir);
|
|
smbdirect_mr->state = MR_READY;
|
|
diff --git a/fs/smb/client/smbdirect.h b/fs/smb/client/smbdirect.h
|
|
index 83f239f376f06d..c881e58c639de3 100644
|
|
--- a/fs/smb/client/smbdirect.h
|
|
+++ b/fs/smb/client/smbdirect.h
|
|
@@ -15,6 +15,8 @@
|
|
#include <rdma/rdma_cm.h>
|
|
#include <linux/mempool.h>
|
|
|
|
+#include "../common/smbdirect/smbdirect_socket.h"
|
|
+
|
|
extern int rdma_readwrite_threshold;
|
|
extern int smbd_max_frmr_depth;
|
|
extern int smbd_keep_alive_interval;
|
|
@@ -50,14 +52,8 @@ enum smbd_connection_status {
|
|
* 5. mempools for allocating packets
|
|
*/
|
|
struct smbd_connection {
|
|
- enum smbd_connection_status transport_status;
|
|
-
|
|
- /* RDMA related */
|
|
- struct rdma_cm_id *id;
|
|
- struct ib_qp_init_attr qp_attr;
|
|
- struct ib_pd *pd;
|
|
- struct ib_cq *send_cq, *recv_cq;
|
|
- struct ib_device_attr dev_attr;
|
|
+ struct smbdirect_socket socket;
|
|
+
|
|
int ri_rc;
|
|
struct completion ri_done;
|
|
wait_queue_head_t conn_wait;
|
|
@@ -111,7 +107,7 @@ struct smbd_connection {
|
|
/* Used by transport to wait until all MRs are returned */
|
|
wait_queue_head_t wait_for_mr_cleanup;
|
|
|
|
- /* Activity accoutning */
|
|
+ /* Activity accounting */
|
|
atomic_t send_pending;
|
|
wait_queue_head_t wait_send_pending;
|
|
wait_queue_head_t wait_post_send;
|
|
diff --git a/fs/smb/common/smbdirect/smbdirect_socket.h b/fs/smb/common/smbdirect/smbdirect_socket.h
|
|
new file mode 100644
|
|
index 00000000000000..69a55561f91ae9
|
|
--- /dev/null
|
|
+++ b/fs/smb/common/smbdirect/smbdirect_socket.h
|
|
@@ -0,0 +1,41 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
+/*
|
|
+ * Copyright (c) 2025 Stefan Metzmacher
|
|
+ */
|
|
+
|
|
+#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__
|
|
+#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__
|
|
+
|
|
+enum smbdirect_socket_status {
|
|
+ SMBDIRECT_SOCKET_CREATED,
|
|
+ SMBDIRECT_SOCKET_CONNECTING,
|
|
+ SMBDIRECT_SOCKET_CONNECTED,
|
|
+ SMBDIRECT_SOCKET_NEGOTIATE_FAILED,
|
|
+ SMBDIRECT_SOCKET_DISCONNECTING,
|
|
+ SMBDIRECT_SOCKET_DISCONNECTED,
|
|
+ SMBDIRECT_SOCKET_DESTROYED
|
|
+};
|
|
+
|
|
+struct smbdirect_socket {
|
|
+ enum smbdirect_socket_status status;
|
|
+
|
|
+ /* RDMA related */
|
|
+ struct {
|
|
+ struct rdma_cm_id *cm_id;
|
|
+ } rdma;
|
|
+
|
|
+ /* IB verbs related */
|
|
+ struct {
|
|
+ struct ib_pd *pd;
|
|
+ struct ib_cq *send_cq;
|
|
+ struct ib_cq *recv_cq;
|
|
+
|
|
+ /*
|
|
+ * shortcuts for rdma.cm_id->{qp,device};
|
|
+ */
|
|
+ struct ib_qp *qp;
|
|
+ struct ib_device *dev;
|
|
+ } ib;
|
|
+};
|
|
+
|
|
+#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__ */
|
|
diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
|
|
index 47e6a8694c0fc1..c769fe3859b37e 100644
|
|
--- a/fs/smb/server/connection.h
|
|
+++ b/fs/smb/server/connection.h
|
|
@@ -45,6 +45,7 @@ struct ksmbd_conn {
|
|
struct mutex srv_mutex;
|
|
int status;
|
|
unsigned int cli_cap;
|
|
+ __be32 inet_addr;
|
|
char *request_buf;
|
|
struct ksmbd_transport *transport;
|
|
struct nls_table *local_nls;
|
|
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
|
|
index e25c2ca56461ac..d3dd3b9b4005c6 100644
|
|
--- a/fs/smb/server/smb2pdu.c
|
|
+++ b/fs/smb/server/smb2pdu.c
|
|
@@ -1610,11 +1610,24 @@ static int krb5_authenticate(struct ksmbd_work *work,
|
|
|
|
rsp->SecurityBufferLength = cpu_to_le16(out_len);
|
|
|
|
- if ((conn->sign || server_conf.enforced_signing) ||
|
|
+ /*
|
|
+ * If session state is SMB2_SESSION_VALID, We can assume
|
|
+ * that it is reauthentication. And the user/password
|
|
+ * has been verified, so return it here.
|
|
+ */
|
|
+ if (sess->state == SMB2_SESSION_VALID) {
|
|
+ if (conn->binding)
|
|
+ goto binding_session;
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if ((rsp->SessionFlags != SMB2_SESSION_FLAG_IS_GUEST_LE &&
|
|
+ (conn->sign || server_conf.enforced_signing)) ||
|
|
(req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
|
|
sess->sign = true;
|
|
|
|
- if (smb3_encryption_negotiated(conn)) {
|
|
+ if (smb3_encryption_negotiated(conn) &&
|
|
+ !(req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
|
|
retval = conn->ops->generate_encryptionkey(conn, sess);
|
|
if (retval) {
|
|
ksmbd_debug(SMB,
|
|
@@ -1627,6 +1640,7 @@ static int krb5_authenticate(struct ksmbd_work *work,
|
|
sess->sign = false;
|
|
}
|
|
|
|
+binding_session:
|
|
if (conn->dialect >= SMB30_PROT_ID) {
|
|
chann = lookup_chann_list(sess, conn);
|
|
if (!chann) {
|
|
@@ -1817,8 +1831,6 @@ int smb2_sess_setup(struct ksmbd_work *work)
|
|
ksmbd_conn_set_good(conn);
|
|
sess->state = SMB2_SESSION_VALID;
|
|
}
|
|
- kfree(sess->Preauth_HashValue);
|
|
- sess->Preauth_HashValue = NULL;
|
|
} else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) {
|
|
if (negblob->MessageType == NtLmNegotiate) {
|
|
rc = ntlm_negotiate(work, negblob, negblob_len, rsp);
|
|
@@ -1845,8 +1857,6 @@ int smb2_sess_setup(struct ksmbd_work *work)
|
|
kfree(preauth_sess);
|
|
}
|
|
}
|
|
- kfree(sess->Preauth_HashValue);
|
|
- sess->Preauth_HashValue = NULL;
|
|
} else {
|
|
pr_info_ratelimited("Unknown NTLMSSP message type : 0x%x\n",
|
|
le32_to_cpu(negblob->MessageType));
|
|
diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
|
|
index 23537e1b346858..2839c704110cd1 100644
|
|
--- a/fs/smb/server/smb_common.c
|
|
+++ b/fs/smb/server/smb_common.c
|
|
@@ -515,7 +515,7 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
|
|
|
|
p = strrchr(longname, '.');
|
|
if (p == longname) { /*name starts with a dot*/
|
|
- strscpy(extension, "___", strlen("___"));
|
|
+ strscpy(extension, "___", sizeof(extension));
|
|
} else {
|
|
if (p) {
|
|
p++;
|
|
diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
|
|
index eaef459776151b..6c3a57bff14703 100644
|
|
--- a/fs/smb/server/transport_rdma.c
|
|
+++ b/fs/smb/server/transport_rdma.c
|
|
@@ -128,9 +128,6 @@ struct smb_direct_transport {
|
|
spinlock_t recvmsg_queue_lock;
|
|
struct list_head recvmsg_queue;
|
|
|
|
- spinlock_t empty_recvmsg_queue_lock;
|
|
- struct list_head empty_recvmsg_queue;
|
|
-
|
|
int send_credit_target;
|
|
atomic_t send_credits;
|
|
spinlock_t lock_new_recv_credits;
|
|
@@ -267,40 +264,19 @@ smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t)
|
|
static void put_recvmsg(struct smb_direct_transport *t,
|
|
struct smb_direct_recvmsg *recvmsg)
|
|
{
|
|
- ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
|
|
- recvmsg->sge.length, DMA_FROM_DEVICE);
|
|
+ if (likely(recvmsg->sge.length != 0)) {
|
|
+ ib_dma_unmap_single(t->cm_id->device,
|
|
+ recvmsg->sge.addr,
|
|
+ recvmsg->sge.length,
|
|
+ DMA_FROM_DEVICE);
|
|
+ recvmsg->sge.length = 0;
|
|
+ }
|
|
|
|
spin_lock(&t->recvmsg_queue_lock);
|
|
list_add(&recvmsg->list, &t->recvmsg_queue);
|
|
spin_unlock(&t->recvmsg_queue_lock);
|
|
}
|
|
|
|
-static struct
|
|
-smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t)
|
|
-{
|
|
- struct smb_direct_recvmsg *recvmsg = NULL;
|
|
-
|
|
- spin_lock(&t->empty_recvmsg_queue_lock);
|
|
- if (!list_empty(&t->empty_recvmsg_queue)) {
|
|
- recvmsg = list_first_entry(&t->empty_recvmsg_queue,
|
|
- struct smb_direct_recvmsg, list);
|
|
- list_del(&recvmsg->list);
|
|
- }
|
|
- spin_unlock(&t->empty_recvmsg_queue_lock);
|
|
- return recvmsg;
|
|
-}
|
|
-
|
|
-static void put_empty_recvmsg(struct smb_direct_transport *t,
|
|
- struct smb_direct_recvmsg *recvmsg)
|
|
-{
|
|
- ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
|
|
- recvmsg->sge.length, DMA_FROM_DEVICE);
|
|
-
|
|
- spin_lock(&t->empty_recvmsg_queue_lock);
|
|
- list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue);
|
|
- spin_unlock(&t->empty_recvmsg_queue_lock);
|
|
-}
|
|
-
|
|
static void enqueue_reassembly(struct smb_direct_transport *t,
|
|
struct smb_direct_recvmsg *recvmsg,
|
|
int data_length)
|
|
@@ -385,9 +361,6 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
|
|
spin_lock_init(&t->recvmsg_queue_lock);
|
|
INIT_LIST_HEAD(&t->recvmsg_queue);
|
|
|
|
- spin_lock_init(&t->empty_recvmsg_queue_lock);
|
|
- INIT_LIST_HEAD(&t->empty_recvmsg_queue);
|
|
-
|
|
init_waitqueue_head(&t->wait_send_pending);
|
|
atomic_set(&t->send_pending, 0);
|
|
|
|
@@ -547,13 +520,13 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
|
t = recvmsg->transport;
|
|
|
|
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
|
|
+ put_recvmsg(t, recvmsg);
|
|
if (wc->status != IB_WC_WR_FLUSH_ERR) {
|
|
pr_err("Recv error. status='%s (%d)' opcode=%d\n",
|
|
ib_wc_status_msg(wc->status), wc->status,
|
|
wc->opcode);
|
|
smb_direct_disconnect_rdma_connection(t);
|
|
}
|
|
- put_empty_recvmsg(t, recvmsg);
|
|
return;
|
|
}
|
|
|
|
@@ -567,7 +540,8 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
|
switch (recvmsg->type) {
|
|
case SMB_DIRECT_MSG_NEGOTIATE_REQ:
|
|
if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) {
|
|
- put_empty_recvmsg(t, recvmsg);
|
|
+ put_recvmsg(t, recvmsg);
|
|
+ smb_direct_disconnect_rdma_connection(t);
|
|
return;
|
|
}
|
|
t->negotiation_requested = true;
|
|
@@ -575,7 +549,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
|
t->status = SMB_DIRECT_CS_CONNECTED;
|
|
enqueue_reassembly(t, recvmsg, 0);
|
|
wake_up_interruptible(&t->wait_status);
|
|
- break;
|
|
+ return;
|
|
case SMB_DIRECT_MSG_DATA_TRANSFER: {
|
|
struct smb_direct_data_transfer *data_transfer =
|
|
(struct smb_direct_data_transfer *)recvmsg->packet;
|
|
@@ -584,7 +558,8 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
|
|
|
if (wc->byte_len <
|
|
offsetof(struct smb_direct_data_transfer, padding)) {
|
|
- put_empty_recvmsg(t, recvmsg);
|
|
+ put_recvmsg(t, recvmsg);
|
|
+ smb_direct_disconnect_rdma_connection(t);
|
|
return;
|
|
}
|
|
|
|
@@ -592,7 +567,8 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
|
if (data_length) {
|
|
if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
|
|
(u64)data_length) {
|
|
- put_empty_recvmsg(t, recvmsg);
|
|
+ put_recvmsg(t, recvmsg);
|
|
+ smb_direct_disconnect_rdma_connection(t);
|
|
return;
|
|
}
|
|
|
|
@@ -604,16 +580,11 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
|
else
|
|
t->full_packet_received = true;
|
|
|
|
- enqueue_reassembly(t, recvmsg, (int)data_length);
|
|
- wake_up_interruptible(&t->wait_reassembly_queue);
|
|
-
|
|
spin_lock(&t->receive_credit_lock);
|
|
receive_credits = --(t->recv_credits);
|
|
avail_recvmsg_count = t->count_avail_recvmsg;
|
|
spin_unlock(&t->receive_credit_lock);
|
|
} else {
|
|
- put_empty_recvmsg(t, recvmsg);
|
|
-
|
|
spin_lock(&t->receive_credit_lock);
|
|
receive_credits = --(t->recv_credits);
|
|
avail_recvmsg_count = ++(t->count_avail_recvmsg);
|
|
@@ -635,11 +606,23 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
|
if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count))
|
|
mod_delayed_work(smb_direct_wq,
|
|
&t->post_recv_credits_work, 0);
|
|
- break;
|
|
+
|
|
+ if (data_length) {
|
|
+ enqueue_reassembly(t, recvmsg, (int)data_length);
|
|
+ wake_up_interruptible(&t->wait_reassembly_queue);
|
|
+ } else
|
|
+ put_recvmsg(t, recvmsg);
|
|
+
|
|
+ return;
|
|
}
|
|
- default:
|
|
- break;
|
|
}
|
|
+
|
|
+ /*
|
|
+ * This is an internal error!
|
|
+ */
|
|
+ WARN_ON_ONCE(recvmsg->type != SMB_DIRECT_MSG_DATA_TRANSFER);
|
|
+ put_recvmsg(t, recvmsg);
|
|
+ smb_direct_disconnect_rdma_connection(t);
|
|
}
|
|
|
|
static int smb_direct_post_recv(struct smb_direct_transport *t,
|
|
@@ -669,6 +652,7 @@ static int smb_direct_post_recv(struct smb_direct_transport *t,
|
|
ib_dma_unmap_single(t->cm_id->device,
|
|
recvmsg->sge.addr, recvmsg->sge.length,
|
|
DMA_FROM_DEVICE);
|
|
+ recvmsg->sge.length = 0;
|
|
smb_direct_disconnect_rdma_connection(t);
|
|
return ret;
|
|
}
|
|
@@ -810,7 +794,6 @@ static void smb_direct_post_recv_credits(struct work_struct *work)
|
|
struct smb_direct_recvmsg *recvmsg;
|
|
int receive_credits, credits = 0;
|
|
int ret;
|
|
- int use_free = 1;
|
|
|
|
spin_lock(&t->receive_credit_lock);
|
|
receive_credits = t->recv_credits;
|
|
@@ -818,18 +801,9 @@ static void smb_direct_post_recv_credits(struct work_struct *work)
|
|
|
|
if (receive_credits < t->recv_credit_target) {
|
|
while (true) {
|
|
- if (use_free)
|
|
- recvmsg = get_free_recvmsg(t);
|
|
- else
|
|
- recvmsg = get_empty_recvmsg(t);
|
|
- if (!recvmsg) {
|
|
- if (use_free) {
|
|
- use_free = 0;
|
|
- continue;
|
|
- } else {
|
|
- break;
|
|
- }
|
|
- }
|
|
+ recvmsg = get_free_recvmsg(t);
|
|
+ if (!recvmsg)
|
|
+ break;
|
|
|
|
recvmsg->type = SMB_DIRECT_MSG_DATA_TRANSFER;
|
|
recvmsg->first_segment = false;
|
|
@@ -1805,8 +1779,6 @@ static void smb_direct_destroy_pools(struct smb_direct_transport *t)
|
|
|
|
while ((recvmsg = get_free_recvmsg(t)))
|
|
mempool_free(recvmsg, t->recvmsg_mempool);
|
|
- while ((recvmsg = get_empty_recvmsg(t)))
|
|
- mempool_free(recvmsg, t->recvmsg_mempool);
|
|
|
|
mempool_destroy(t->recvmsg_mempool);
|
|
t->recvmsg_mempool = NULL;
|
|
@@ -1862,6 +1834,7 @@ static int smb_direct_create_pools(struct smb_direct_transport *t)
|
|
if (!recvmsg)
|
|
goto err;
|
|
recvmsg->transport = t;
|
|
+ recvmsg->sge.length = 0;
|
|
list_add(&recvmsg->list, &t->recvmsg_queue);
|
|
}
|
|
t->count_avail_recvmsg = t->recv_credit_max;
|
|
diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
|
|
index 64941a49438f37..e86bc4a460687a 100644
|
|
--- a/fs/smb/server/transport_tcp.c
|
|
+++ b/fs/smb/server/transport_tcp.c
|
|
@@ -87,6 +87,7 @@ static struct tcp_transport *alloc_transport(struct socket *client_sk)
|
|
return NULL;
|
|
}
|
|
|
|
+ conn->inet_addr = inet_sk(client_sk->sk)->inet_daddr;
|
|
conn->transport = KSMBD_TRANS(t);
|
|
KSMBD_TRANS(t)->conn = conn;
|
|
KSMBD_TRANS(t)->ops = &ksmbd_tcp_transport_ops;
|
|
@@ -230,6 +231,8 @@ static int ksmbd_kthread_fn(void *p)
|
|
{
|
|
struct socket *client_sk = NULL;
|
|
struct interface *iface = (struct interface *)p;
|
|
+ struct inet_sock *csk_inet;
|
|
+ struct ksmbd_conn *conn;
|
|
int ret;
|
|
|
|
while (!kthread_should_stop()) {
|
|
@@ -248,6 +251,20 @@ static int ksmbd_kthread_fn(void *p)
|
|
continue;
|
|
}
|
|
|
|
+ /*
|
|
+ * Limits repeated connections from clients with the same IP.
|
|
+ */
|
|
+ csk_inet = inet_sk(client_sk->sk);
|
|
+ down_read(&conn_list_lock);
|
|
+ list_for_each_entry(conn, &conn_list, conns_list)
|
|
+ if (csk_inet->inet_daddr == conn->inet_addr) {
|
|
+ ret = -EAGAIN;
|
|
+ break;
|
|
+ }
|
|
+ up_read(&conn_list_lock);
|
|
+ if (ret == -EAGAIN)
|
|
+ continue;
|
|
+
|
|
if (server_conf.max_connections &&
|
|
atomic_inc_return(&active_num_conn) >= server_conf.max_connections) {
|
|
pr_info_ratelimited("Limit the maximum number of connections(%u)\n",
|
|
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
|
|
index de813700f3d4f6..067b346921b63f 100644
|
|
--- a/fs/smb/server/vfs.c
|
|
+++ b/fs/smb/server/vfs.c
|
|
@@ -563,7 +563,8 @@ int ksmbd_vfs_getattr(const struct path *path, struct kstat *stat)
|
|
{
|
|
int err;
|
|
|
|
- err = vfs_getattr(path, stat, STATX_BTIME, AT_STATX_SYNC_AS_STAT);
|
|
+ err = vfs_getattr(path, stat, STATX_BASIC_STATS | STATX_BTIME,
|
|
+ AT_STATX_SYNC_AS_STAT);
|
|
if (err)
|
|
pr_err("getattr failed, err %d\n", err);
|
|
return err;
|
|
diff --git a/include/linux/audit.h b/include/linux/audit.h
|
|
index 51b1b7054a233a..335e1ba5a23271 100644
|
|
--- a/include/linux/audit.h
|
|
+++ b/include/linux/audit.h
|
|
@@ -416,7 +416,7 @@ extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
|
|
extern void __audit_log_capset(const struct cred *new, const struct cred *old);
|
|
extern void __audit_mmap_fd(int fd, int flags);
|
|
extern void __audit_openat2_how(struct open_how *how);
|
|
-extern void __audit_log_kern_module(char *name);
|
|
+extern void __audit_log_kern_module(const char *name);
|
|
extern void __audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar);
|
|
extern void __audit_tk_injoffset(struct timespec64 offset);
|
|
extern void __audit_ntp_log(const struct audit_ntp_data *ad);
|
|
@@ -518,7 +518,7 @@ static inline void audit_openat2_how(struct open_how *how)
|
|
__audit_openat2_how(how);
|
|
}
|
|
|
|
-static inline void audit_log_kern_module(char *name)
|
|
+static inline void audit_log_kern_module(const char *name)
|
|
{
|
|
if (!audit_dummy_context())
|
|
__audit_log_kern_module(name);
|
|
@@ -676,9 +676,8 @@ static inline void audit_mmap_fd(int fd, int flags)
|
|
static inline void audit_openat2_how(struct open_how *how)
|
|
{ }
|
|
|
|
-static inline void audit_log_kern_module(char *name)
|
|
-{
|
|
-}
|
|
+static inline void audit_log_kern_module(const char *name)
|
|
+{ }
|
|
|
|
static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar)
|
|
{ }
|
|
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
|
|
index c13e99cbbf8162..c9eae711700133 100644
|
|
--- a/include/linux/fs_context.h
|
|
+++ b/include/linux/fs_context.h
|
|
@@ -196,7 +196,7 @@ void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt,
|
|
*/
|
|
#define infof(fc, fmt, ...) __logfc(fc, 'i', fmt, ## __VA_ARGS__)
|
|
#define info_plog(p, fmt, ...) __plog(p, 'i', fmt, ## __VA_ARGS__)
|
|
-#define infofc(p, fmt, ...) __plog((&(fc)->log), 'i', fmt, ## __VA_ARGS__)
|
|
+#define infofc(fc, fmt, ...) __plog((&(fc)->log), 'i', fmt, ## __VA_ARGS__)
|
|
|
|
/**
|
|
* warnf - Store supplementary warning message
|
|
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
|
|
index 962cd41a2cb5af..061e19c94a6bc6 100644
|
|
--- a/include/linux/moduleparam.h
|
|
+++ b/include/linux/moduleparam.h
|
|
@@ -282,10 +282,9 @@ struct kparam_array
|
|
#define __moduleparam_const const
|
|
#endif
|
|
|
|
-/* This is the fundamental function for registering boot/module
|
|
- parameters. */
|
|
+/* This is the fundamental function for registering boot/module parameters. */
|
|
#define __module_param_call(prefix, name, ops, arg, perm, level, flags) \
|
|
- /* Default value instead of permissions? */ \
|
|
+ static_assert(sizeof(""prefix) - 1 <= MAX_PARAM_PREFIX_LEN); \
|
|
static const char __param_str_##name[] = prefix #name; \
|
|
static struct kernel_param __moduleparam_const __param_##name \
|
|
__used __section("__param") \
|
|
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
|
|
index c7abce28ed2995..aab0aebb529e02 100644
|
|
--- a/include/linux/pps_kernel.h
|
|
+++ b/include/linux/pps_kernel.h
|
|
@@ -52,6 +52,7 @@ struct pps_device {
|
|
int current_mode; /* PPS mode at event time */
|
|
|
|
unsigned int last_ev; /* last PPS event id */
|
|
+ unsigned int last_fetched_ev; /* last fetched PPS event id */
|
|
wait_queue_head_t queue; /* PPS event queue */
|
|
|
|
unsigned int id; /* PPS source unique ID */
|
|
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
|
|
index 1aca3f332d9c06..85672adc734921 100644
|
|
--- a/include/linux/proc_fs.h
|
|
+++ b/include/linux/proc_fs.h
|
|
@@ -27,6 +27,7 @@ enum {
|
|
|
|
PROC_ENTRY_proc_read_iter = 1U << 1,
|
|
PROC_ENTRY_proc_compat_ioctl = 1U << 2,
|
|
+ PROC_ENTRY_proc_lseek = 1U << 3,
|
|
};
|
|
|
|
struct proc_ops {
|
|
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
|
|
index f1fd3a8044e0ec..dd10c22299ab82 100644
|
|
--- a/include/linux/psi_types.h
|
|
+++ b/include/linux/psi_types.h
|
|
@@ -84,11 +84,9 @@ enum psi_aggregators {
|
|
struct psi_group_cpu {
|
|
/* 1st cacheline updated by the scheduler */
|
|
|
|
- /* Aggregator needs to know of concurrent changes */
|
|
- seqcount_t seq ____cacheline_aligned_in_smp;
|
|
-
|
|
/* States of the tasks belonging to this group */
|
|
- unsigned int tasks[NR_PSI_TASK_COUNTS];
|
|
+ unsigned int tasks[NR_PSI_TASK_COUNTS]
|
|
+ ____cacheline_aligned_in_smp;
|
|
|
|
/* Aggregate pressure state derived from the tasks */
|
|
u32 state_mask;
|
|
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
|
index 393c300347dee4..cb38eee732fd02 100644
|
|
--- a/include/linux/sched.h
|
|
+++ b/include/linux/sched.h
|
|
@@ -753,10 +753,8 @@ struct task_struct {
|
|
#endif
|
|
unsigned int __state;
|
|
|
|
-#ifdef CONFIG_PREEMPT_RT
|
|
/* saved state for "spinlock sleepers" */
|
|
unsigned int saved_state;
|
|
-#endif
|
|
|
|
/*
|
|
* This begins the randomizable portion of task_struct. Only
|
|
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
|
|
index f7d392d849be56..7b7222b4f6111d 100644
|
|
--- a/include/linux/skbuff.h
|
|
+++ b/include/linux/skbuff.h
|
|
@@ -2877,6 +2877,29 @@ static inline void skb_reset_transport_header(struct sk_buff *skb)
|
|
skb->transport_header = skb->data - skb->head;
|
|
}
|
|
|
|
+/**
|
|
+ * skb_reset_transport_header_careful - conditionally reset transport header
|
|
+ * @skb: buffer to alter
|
|
+ *
|
|
+ * Hardened version of skb_reset_transport_header().
|
|
+ *
|
|
+ * Returns: true if the operation was a success.
|
|
+ */
|
|
+static inline bool __must_check
|
|
+skb_reset_transport_header_careful(struct sk_buff *skb)
|
|
+{
|
|
+ long offset = skb->data - skb->head;
|
|
+
|
|
+ if (unlikely(offset != (typeof(skb->transport_header))offset))
|
|
+ return false;
|
|
+
|
|
+ if (unlikely(offset == (typeof(skb->transport_header))~0U))
|
|
+ return false;
|
|
+
|
|
+ skb->transport_header = offset;
|
|
+ return true;
|
|
+}
|
|
+
|
|
static inline void skb_set_transport_header(struct sk_buff *skb,
|
|
const int offset)
|
|
{
|
|
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
|
|
index 0b9f1e598e3a6b..4bc6bb01a0eb8b 100644
|
|
--- a/include/linux/usb/usbnet.h
|
|
+++ b/include/linux/usb/usbnet.h
|
|
@@ -76,6 +76,7 @@ struct usbnet {
|
|
# define EVENT_LINK_CHANGE 11
|
|
# define EVENT_SET_RX_MODE 12
|
|
# define EVENT_NO_IP_ALIGN 13
|
|
+# define EVENT_LINK_CARRIER_ON 14
|
|
/* This one is special, as it indicates that the device is going away
|
|
* there are cyclic dependencies between tasklet, timer and bh
|
|
* that must be broken
|
|
diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
|
|
index 7725b7579b7819..2209c227e85920 100644
|
|
--- a/include/linux/wait_bit.h
|
|
+++ b/include/linux/wait_bit.h
|
|
@@ -335,4 +335,64 @@ static inline void clear_and_wake_up_bit(int bit, void *word)
|
|
wake_up_bit(word, bit);
|
|
}
|
|
|
|
+/**
|
|
+ * test_and_clear_wake_up_bit - clear a bit if it was set: wake up anyone waiting on that bit
|
|
+ * @bit: the bit of the word being waited on
|
|
+ * @word: the address of memory containing that bit
|
|
+ *
|
|
+ * If the bit is set and can be atomically cleared, any tasks waiting in
|
|
+ * wait_on_bit() or similar will be woken. This call has the same
|
|
+ * complete ordering semantics as test_and_clear_bit(). Any changes to
|
|
+ * memory made before this call are guaranteed to be visible after the
|
|
+ * corresponding wait_on_bit() completes.
|
|
+ *
|
|
+ * Returns %true if the bit was successfully set and the wake up was sent.
|
|
+ */
|
|
+static inline bool test_and_clear_wake_up_bit(int bit, unsigned long *word)
|
|
+{
|
|
+ if (!test_and_clear_bit(bit, word))
|
|
+ return false;
|
|
+ /* no extra barrier required */
|
|
+ wake_up_bit(word, bit);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * atomic_dec_and_wake_up - decrement an atomic_t and if zero, wake up waiters
|
|
+ * @var: the variable to dec and test
|
|
+ *
|
|
+ * Decrements the atomic variable and if it reaches zero, send a wake_up to any
|
|
+ * processes waiting on the variable.
|
|
+ *
|
|
+ * This function has the same complete ordering semantics as atomic_dec_and_test.
|
|
+ *
|
|
+ * Returns %true is the variable reaches zero and the wake up was sent.
|
|
+ */
|
|
+
|
|
+static inline bool atomic_dec_and_wake_up(atomic_t *var)
|
|
+{
|
|
+ if (!atomic_dec_and_test(var))
|
|
+ return false;
|
|
+ /* No extra barrier required */
|
|
+ wake_up_var(var);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * store_release_wake_up - update a variable and send a wake_up
|
|
+ * @var: the address of the variable to be updated and woken
|
|
+ * @val: the value to store in the variable.
|
|
+ *
|
|
+ * Store the given value in the variable send a wake up to any tasks
|
|
+ * waiting on the variable. All necessary barriers are included to ensure
|
|
+ * the task calling wait_var_event() sees the new value and all values
|
|
+ * written to memory before this call.
|
|
+ */
|
|
+#define store_release_wake_up(var, val) \
|
|
+do { \
|
|
+ smp_store_release(var, val); \
|
|
+ smp_mb(); \
|
|
+ wake_up_var(var); \
|
|
+} while (0)
|
|
+
|
|
#endif /* _LINUX_WAIT_BIT_H */
|
|
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
|
|
index e4a97b2d099841..4c084a03d6bb75 100644
|
|
--- a/include/net/bluetooth/hci.h
|
|
+++ b/include/net/bluetooth/hci.h
|
|
@@ -2577,6 +2577,7 @@ struct hci_ev_le_conn_complete {
|
|
#define LE_EXT_ADV_DIRECT_IND 0x0004
|
|
#define LE_EXT_ADV_SCAN_RSP 0x0008
|
|
#define LE_EXT_ADV_LEGACY_PDU 0x0010
|
|
+#define LE_EXT_ADV_DATA_STATUS_MASK 0x0060
|
|
#define LE_EXT_ADV_EVT_TYPE_MASK 0x007f
|
|
|
|
#define ADDR_LE_DEV_PUBLIC 0x00
|
|
diff --git a/include/net/dst.h b/include/net/dst.h
|
|
index 16b7b99b5f309c..60fb5d2faf43e4 100644
|
|
--- a/include/net/dst.h
|
|
+++ b/include/net/dst.h
|
|
@@ -464,7 +464,7 @@ INDIRECT_CALLABLE_DECLARE(int ip_output(struct net *, struct sock *,
|
|
/* Output packet to network from transport. */
|
|
static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
- return INDIRECT_CALL_INET(skb_dst(skb)->output,
|
|
+ return INDIRECT_CALL_INET(READ_ONCE(skb_dst(skb)->output),
|
|
ip6_output, ip_output,
|
|
net, sk, skb);
|
|
}
|
|
@@ -474,7 +474,7 @@ INDIRECT_CALLABLE_DECLARE(int ip_local_deliver(struct sk_buff *));
|
|
/* Input packet from network to transport. */
|
|
static inline int dst_input(struct sk_buff *skb)
|
|
{
|
|
- return INDIRECT_CALL_INET(skb_dst(skb)->input,
|
|
+ return INDIRECT_CALL_INET(READ_ONCE(skb_dst(skb)->input),
|
|
ip6_input, ip_local_deliver, skb);
|
|
}
|
|
|
|
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
|
|
index 53bd2d02a4f0db..09791f5d9b6ec8 100644
|
|
--- a/include/net/lwtunnel.h
|
|
+++ b/include/net/lwtunnel.h
|
|
@@ -138,12 +138,12 @@ int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
|
|
static inline void lwtunnel_set_redirect(struct dst_entry *dst)
|
|
{
|
|
if (lwtunnel_output_redirect(dst->lwtstate)) {
|
|
- dst->lwtstate->orig_output = dst->output;
|
|
- dst->output = lwtunnel_output;
|
|
+ dst->lwtstate->orig_output = READ_ONCE(dst->output);
|
|
+ WRITE_ONCE(dst->output, lwtunnel_output);
|
|
}
|
|
if (lwtunnel_input_redirect(dst->lwtstate)) {
|
|
- dst->lwtstate->orig_input = dst->input;
|
|
- dst->input = lwtunnel_input;
|
|
+ dst->lwtstate->orig_input = READ_ONCE(dst->input);
|
|
+ WRITE_ONCE(dst->input, lwtunnel_input);
|
|
}
|
|
}
|
|
#else
|
|
diff --git a/include/net/tc_act/tc_ctinfo.h b/include/net/tc_act/tc_ctinfo.h
|
|
index f071c1d70a25e1..a04bcac7adf4b6 100644
|
|
--- a/include/net/tc_act/tc_ctinfo.h
|
|
+++ b/include/net/tc_act/tc_ctinfo.h
|
|
@@ -18,9 +18,9 @@ struct tcf_ctinfo_params {
|
|
struct tcf_ctinfo {
|
|
struct tc_action common;
|
|
struct tcf_ctinfo_params __rcu *params;
|
|
- u64 stats_dscp_set;
|
|
- u64 stats_dscp_error;
|
|
- u64 stats_cpmark_set;
|
|
+ atomic64_t stats_dscp_set;
|
|
+ atomic64_t stats_dscp_error;
|
|
+ atomic64_t stats_cpmark_set;
|
|
};
|
|
|
|
enum {
|
|
diff --git a/include/net/udp.h b/include/net/udp.h
|
|
index 488a6d2babccf2..89eeb187667b01 100644
|
|
--- a/include/net/udp.h
|
|
+++ b/include/net/udp.h
|
|
@@ -466,6 +466,16 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
|
|
{
|
|
netdev_features_t features = NETIF_F_SG;
|
|
struct sk_buff *segs;
|
|
+ int drop_count;
|
|
+
|
|
+ /*
|
|
+ * Segmentation in UDP receive path is only for UDP GRO, drop udp
|
|
+ * fragmentation offload (UFO) packets.
|
|
+ */
|
|
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
|
|
+ drop_count = 1;
|
|
+ goto drop;
|
|
+ }
|
|
|
|
/* Avoid csum recalculation by skb_segment unless userspace explicitly
|
|
* asks for the final checksum values
|
|
@@ -489,16 +499,18 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
|
|
*/
|
|
segs = __skb_gso_segment(skb, features, false);
|
|
if (IS_ERR_OR_NULL(segs)) {
|
|
- int segs_nr = skb_shinfo(skb)->gso_segs;
|
|
-
|
|
- atomic_add(segs_nr, &sk->sk_drops);
|
|
- SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
|
|
- kfree_skb(skb);
|
|
- return NULL;
|
|
+ drop_count = skb_shinfo(skb)->gso_segs;
|
|
+ goto drop;
|
|
}
|
|
|
|
consume_skb(skb);
|
|
return segs;
|
|
+
|
|
+drop:
|
|
+ atomic_add(drop_count, &sk->sk_drops);
|
|
+ SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, drop_count);
|
|
+ kfree_skb(skb);
|
|
+ return NULL;
|
|
}
|
|
|
|
static inline void udp_post_segment_fix_csum(struct sk_buff *skb)
|
|
diff --git a/kernel/audit.h b/kernel/audit.h
|
|
index a60d2840559e2b..5156ecd3545733 100644
|
|
--- a/kernel/audit.h
|
|
+++ b/kernel/audit.h
|
|
@@ -199,7 +199,7 @@ struct audit_context {
|
|
int argc;
|
|
} execve;
|
|
struct {
|
|
- char *name;
|
|
+ const char *name;
|
|
} module;
|
|
struct {
|
|
struct audit_ntp_data ntp_data;
|
|
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
|
|
index 6f0d6fb6523fa7..d48830c7e4beb7 100644
|
|
--- a/kernel/auditsc.c
|
|
+++ b/kernel/auditsc.c
|
|
@@ -2870,7 +2870,7 @@ void __audit_openat2_how(struct open_how *how)
|
|
context->type = AUDIT_OPENAT2;
|
|
}
|
|
|
|
-void __audit_log_kern_module(char *name)
|
|
+void __audit_log_kern_module(const char *name)
|
|
{
|
|
struct audit_context *context = audit_context();
|
|
|
|
diff --git a/kernel/bpf/preload/Kconfig b/kernel/bpf/preload/Kconfig
|
|
index c9d45c9d6918d1..f9b11d01c3b50d 100644
|
|
--- a/kernel/bpf/preload/Kconfig
|
|
+++ b/kernel/bpf/preload/Kconfig
|
|
@@ -10,7 +10,6 @@ menuconfig BPF_PRELOAD
|
|
# The dependency on !COMPILE_TEST prevents it from being enabled
|
|
# in allmodconfig or allyesconfig configurations
|
|
depends on !COMPILE_TEST
|
|
- select USERMODE_DRIVER
|
|
help
|
|
This builds kernel module with several embedded BPF programs that are
|
|
pinned into BPF FS mount point as human readable files that are
|
|
diff --git a/kernel/events/core.c b/kernel/events/core.c
|
|
index 3a33d9c1b1b2b4..b73f5c44113d64 100644
|
|
--- a/kernel/events/core.c
|
|
+++ b/kernel/events/core.c
|
|
@@ -6499,11 +6499,21 @@ static void perf_mmap_close(struct vm_area_struct *vma)
|
|
ring_buffer_put(rb); /* could be last */
|
|
}
|
|
|
|
+static int perf_mmap_may_split(struct vm_area_struct *vma, unsigned long addr)
|
|
+{
|
|
+ /*
|
|
+ * Forbid splitting perf mappings to prevent refcount leaks due to
|
|
+ * the resulting non-matching offsets and sizes. See open()/close().
|
|
+ */
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
static const struct vm_operations_struct perf_mmap_vmops = {
|
|
.open = perf_mmap_open,
|
|
.close = perf_mmap_close, /* non mergeable */
|
|
.fault = perf_mmap_fault,
|
|
.page_mkwrite = perf_mmap_fault,
|
|
+ .may_split = perf_mmap_may_split,
|
|
};
|
|
|
|
static int perf_mmap(struct file *file, struct vm_area_struct *vma)
|
|
@@ -6595,9 +6605,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
|
|
goto unlock;
|
|
}
|
|
|
|
- atomic_set(&rb->aux_mmap_count, 1);
|
|
user_extra = nr_pages;
|
|
-
|
|
goto accounting;
|
|
}
|
|
|
|
@@ -6699,8 +6707,10 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
|
|
} else {
|
|
ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
|
|
event->attr.aux_watermark, flags);
|
|
- if (!ret)
|
|
+ if (!ret) {
|
|
+ atomic_set(&rb->aux_mmap_count, 1);
|
|
rb->aux_mmap_locked = extra;
|
|
+ }
|
|
}
|
|
|
|
unlock:
|
|
@@ -6710,6 +6720,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
|
|
|
|
atomic_inc(&event->mmap_count);
|
|
} else if (rb) {
|
|
+ /* AUX allocation failed */
|
|
atomic_dec(&rb->mmap_count);
|
|
}
|
|
aux_unlock:
|
|
@@ -6717,6 +6728,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
|
|
mutex_unlock(aux_mutex);
|
|
mutex_unlock(&event->mmap_mutex);
|
|
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
/*
|
|
* Since pinned accounting is per vm we cannot allow fork() to copy our
|
|
* vma.
|
|
diff --git a/kernel/freezer.c b/kernel/freezer.c
|
|
index 4fad0e6fca6447..d8db479af478e7 100644
|
|
--- a/kernel/freezer.c
|
|
+++ b/kernel/freezer.c
|
|
@@ -71,7 +71,11 @@ bool __refrigerator(bool check_kthr_stop)
|
|
for (;;) {
|
|
bool freeze;
|
|
|
|
+ raw_spin_lock_irq(¤t->pi_lock);
|
|
set_current_state(TASK_FROZEN);
|
|
+ /* unstale saved_state so that __thaw_task() will wake us up */
|
|
+ current->saved_state = TASK_RUNNING;
|
|
+ raw_spin_unlock_irq(¤t->pi_lock);
|
|
|
|
spin_lock_irq(&freezer_lock);
|
|
freeze = freezing(current) && !(check_kthr_stop && kthread_should_stop());
|
|
@@ -129,6 +133,7 @@ static int __set_task_frozen(struct task_struct *p, void *arg)
|
|
WARN_ON_ONCE(debug_locks && p->lockdep_depth);
|
|
#endif
|
|
|
|
+ p->saved_state = p->__state;
|
|
WRITE_ONCE(p->__state, TASK_FROZEN);
|
|
return TASK_FROZEN;
|
|
}
|
|
@@ -170,46 +175,30 @@ bool freeze_task(struct task_struct *p)
|
|
}
|
|
|
|
/*
|
|
- * The special task states (TASK_STOPPED, TASK_TRACED) keep their canonical
|
|
- * state in p->jobctl. If either of them got a wakeup that was missed because
|
|
- * TASK_FROZEN, then their canonical state reflects that and the below will
|
|
- * refuse to restore the special state and instead issue the wakeup.
|
|
+ * Restore the saved_state before the task entered freezer. For typical task
|
|
+ * in the __refrigerator(), saved_state == TASK_RUNNING so nothing happens
|
|
+ * here. For tasks which were TASK_NORMAL | TASK_FREEZABLE, their initial state
|
|
+ * is restored unless they got an expected wakeup (see ttwu_state_match()).
|
|
+ * Returns 1 if the task state was restored.
|
|
*/
|
|
-static int __set_task_special(struct task_struct *p, void *arg)
|
|
+static int __restore_freezer_state(struct task_struct *p, void *arg)
|
|
{
|
|
- unsigned int state = 0;
|
|
+ unsigned int state = p->saved_state;
|
|
|
|
- if (p->jobctl & JOBCTL_TRACED)
|
|
- state = TASK_TRACED;
|
|
-
|
|
- else if (p->jobctl & JOBCTL_STOPPED)
|
|
- state = TASK_STOPPED;
|
|
-
|
|
- if (state)
|
|
+ if (state != TASK_RUNNING) {
|
|
WRITE_ONCE(p->__state, state);
|
|
+ p->saved_state = TASK_RUNNING;
|
|
+ return 1;
|
|
+ }
|
|
|
|
- return state;
|
|
+ return 0;
|
|
}
|
|
|
|
void __thaw_task(struct task_struct *p)
|
|
{
|
|
- unsigned long flags, flags2;
|
|
-
|
|
- spin_lock_irqsave(&freezer_lock, flags);
|
|
- if (WARN_ON_ONCE(freezing(p)))
|
|
- goto unlock;
|
|
-
|
|
- if (lock_task_sighand(p, &flags2)) {
|
|
- /* TASK_FROZEN -> TASK_{STOPPED,TRACED} */
|
|
- bool ret = task_call_func(p, __set_task_special, NULL);
|
|
- unlock_task_sighand(p, &flags2);
|
|
- if (ret)
|
|
- goto unlock;
|
|
- }
|
|
-
|
|
- wake_up_state(p, TASK_FROZEN);
|
|
-unlock:
|
|
- spin_unlock_irqrestore(&freezer_lock, flags);
|
|
+ guard(spinlock_irqsave)(&freezer_lock);
|
|
+ if (frozen(p) && !task_call_func(p, __restore_freezer_state, NULL))
|
|
+ wake_up_state(p, TASK_FROZEN);
|
|
}
|
|
|
|
/**
|
|
diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c
|
|
index 0ddbdab5903dff..9d8c95defdd673 100644
|
|
--- a/kernel/kcsan/kcsan_test.c
|
|
+++ b/kernel/kcsan/kcsan_test.c
|
|
@@ -530,7 +530,7 @@ static void test_barrier_nothreads(struct kunit *test)
|
|
struct kcsan_scoped_access *reorder_access = NULL;
|
|
#endif
|
|
arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED;
|
|
- atomic_t dummy;
|
|
+ atomic_t dummy = ATOMIC_INIT(0);
|
|
|
|
KCSAN_TEST_REQUIRES(test, reorder_access != NULL);
|
|
KCSAN_TEST_REQUIRES(test, IS_ENABLED(CONFIG_SMP));
|
|
diff --git a/kernel/module/main.c b/kernel/module/main.c
|
|
index b00e31721a73e3..9711ad14825b24 100644
|
|
--- a/kernel/module/main.c
|
|
+++ b/kernel/module/main.c
|
|
@@ -2876,7 +2876,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
|
|
|
|
module_allocated = true;
|
|
|
|
- audit_log_kern_module(mod->name);
|
|
+ audit_log_kern_module(info->name);
|
|
|
|
/* Reserve our place in the list. */
|
|
err = add_unformed_module(mod);
|
|
@@ -3034,8 +3034,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
|
|
* failures once the proper module was allocated and
|
|
* before that.
|
|
*/
|
|
- if (!module_allocated)
|
|
+ if (!module_allocated) {
|
|
+ audit_log_kern_module(info->name ? info->name : "?");
|
|
mod_stat_bump_becoming(info, flags);
|
|
+ }
|
|
free_copy(info, flags);
|
|
return err;
|
|
}
|
|
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
|
index 760a6c3781cbfc..1b5e4389f788a3 100644
|
|
--- a/kernel/sched/core.c
|
|
+++ b/kernel/sched/core.c
|
|
@@ -2238,31 +2238,26 @@ int __task_state_match(struct task_struct *p, unsigned int state)
|
|
if (READ_ONCE(p->__state) & state)
|
|
return 1;
|
|
|
|
-#ifdef CONFIG_PREEMPT_RT
|
|
if (READ_ONCE(p->saved_state) & state)
|
|
return -1;
|
|
-#endif
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
static __always_inline
|
|
int task_state_match(struct task_struct *p, unsigned int state)
|
|
{
|
|
-#ifdef CONFIG_PREEMPT_RT
|
|
int match;
|
|
|
|
/*
|
|
* Serialize against current_save_and_set_rtlock_wait_state() and
|
|
- * current_restore_rtlock_saved_state().
|
|
+ * current_restore_rtlock_saved_state(), and __refrigerator().
|
|
*/
|
|
raw_spin_lock_irq(&p->pi_lock);
|
|
match = __task_state_match(p, state);
|
|
raw_spin_unlock_irq(&p->pi_lock);
|
|
|
|
return match;
|
|
-#else
|
|
- return __task_state_match(p, state);
|
|
-#endif
|
|
}
|
|
|
|
/*
|
|
@@ -4039,13 +4034,17 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
|
|
* The caller holds p::pi_lock if p != current or has preemption
|
|
* disabled when p == current.
|
|
*
|
|
- * The rules of PREEMPT_RT saved_state:
|
|
+ * The rules of saved_state:
|
|
*
|
|
* The related locking code always holds p::pi_lock when updating
|
|
* p::saved_state, which means the code is fully serialized in both cases.
|
|
*
|
|
- * The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
|
|
- * bits set. This allows to distinguish all wakeup scenarios.
|
|
+ * For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
|
|
+ * No other bits set. This allows to distinguish all wakeup scenarios.
|
|
+ *
|
|
+ * For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
|
|
+ * allows us to prevent early wakeup of tasks before they can be run on
|
|
+ * asymmetric ISA architectures (eg ARMv9).
|
|
*/
|
|
static __always_inline
|
|
bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
|
|
@@ -4059,13 +4058,13 @@ bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
|
|
|
|
*success = !!(match = __task_state_match(p, state));
|
|
|
|
-#ifdef CONFIG_PREEMPT_RT
|
|
/*
|
|
* Saved state preserves the task state across blocking on
|
|
- * an RT lock. If the state matches, set p::saved_state to
|
|
- * TASK_RUNNING, but do not wake the task because it waits
|
|
- * for a lock wakeup. Also indicate success because from
|
|
- * the regular waker's point of view this has succeeded.
|
|
+ * an RT lock or TASK_FREEZABLE tasks. If the state matches,
|
|
+ * set p::saved_state to TASK_RUNNING, but do not wake the task
|
|
+ * because it waits for a lock wakeup or __thaw_task(). Also
|
|
+ * indicate success because from the regular waker's point of
|
|
+ * view this has succeeded.
|
|
*
|
|
* After acquiring the lock the task will restore p::__state
|
|
* from p::saved_state which ensures that the regular
|
|
@@ -4075,7 +4074,7 @@ bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
|
|
*/
|
|
if (match < 0)
|
|
p->saved_state = TASK_RUNNING;
|
|
-#endif
|
|
+
|
|
return match > 0;
|
|
}
|
|
|
|
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
|
|
index f97e1473389ff1..08a9a9f909d518 100644
|
|
--- a/kernel/sched/psi.c
|
|
+++ b/kernel/sched/psi.c
|
|
@@ -172,17 +172,35 @@ struct psi_group psi_system = {
|
|
.pcpu = &system_group_pcpu,
|
|
};
|
|
|
|
+static DEFINE_PER_CPU(seqcount_t, psi_seq) = SEQCNT_ZERO(psi_seq);
|
|
+
|
|
+static inline void psi_write_begin(int cpu)
|
|
+{
|
|
+ write_seqcount_begin(per_cpu_ptr(&psi_seq, cpu));
|
|
+}
|
|
+
|
|
+static inline void psi_write_end(int cpu)
|
|
+{
|
|
+ write_seqcount_end(per_cpu_ptr(&psi_seq, cpu));
|
|
+}
|
|
+
|
|
+static inline u32 psi_read_begin(int cpu)
|
|
+{
|
|
+ return read_seqcount_begin(per_cpu_ptr(&psi_seq, cpu));
|
|
+}
|
|
+
|
|
+static inline bool psi_read_retry(int cpu, u32 seq)
|
|
+{
|
|
+ return read_seqcount_retry(per_cpu_ptr(&psi_seq, cpu), seq);
|
|
+}
|
|
+
|
|
static void psi_avgs_work(struct work_struct *work);
|
|
|
|
static void poll_timer_fn(struct timer_list *t);
|
|
|
|
static void group_init(struct psi_group *group)
|
|
{
|
|
- int cpu;
|
|
-
|
|
group->enabled = true;
|
|
- for_each_possible_cpu(cpu)
|
|
- seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
|
|
group->avg_last_update = sched_clock();
|
|
group->avg_next_update = group->avg_last_update + psi_period;
|
|
mutex_init(&group->avgs_lock);
|
|
@@ -258,14 +276,14 @@ static void get_recent_times(struct psi_group *group, int cpu,
|
|
|
|
/* Snapshot a coherent view of the CPU state */
|
|
do {
|
|
- seq = read_seqcount_begin(&groupc->seq);
|
|
+ seq = psi_read_begin(cpu);
|
|
now = cpu_clock(cpu);
|
|
memcpy(times, groupc->times, sizeof(groupc->times));
|
|
state_mask = groupc->state_mask;
|
|
state_start = groupc->state_start;
|
|
if (cpu == current_cpu)
|
|
memcpy(tasks, groupc->tasks, sizeof(groupc->tasks));
|
|
- } while (read_seqcount_retry(&groupc->seq, seq));
|
|
+ } while (psi_read_retry(cpu, seq));
|
|
|
|
/* Calculate state time deltas against the previous snapshot */
|
|
for (s = 0; s < NR_PSI_STATES; s++) {
|
|
@@ -775,31 +793,21 @@ static void record_times(struct psi_group_cpu *groupc, u64 now)
|
|
groupc->times[PSI_NONIDLE] += delta;
|
|
}
|
|
|
|
+#define for_each_group(iter, group) \
|
|
+ for (typeof(group) iter = group; iter; iter = iter->parent)
|
|
+
|
|
static void psi_group_change(struct psi_group *group, int cpu,
|
|
unsigned int clear, unsigned int set,
|
|
- bool wake_clock)
|
|
+ u64 now, bool wake_clock)
|
|
{
|
|
struct psi_group_cpu *groupc;
|
|
unsigned int t, m;
|
|
enum psi_states s;
|
|
u32 state_mask;
|
|
- u64 now;
|
|
|
|
lockdep_assert_rq_held(cpu_rq(cpu));
|
|
groupc = per_cpu_ptr(group->pcpu, cpu);
|
|
|
|
- /*
|
|
- * First we update the task counts according to the state
|
|
- * change requested through the @clear and @set bits.
|
|
- *
|
|
- * Then if the cgroup PSI stats accounting enabled, we
|
|
- * assess the aggregate resource states this CPU's tasks
|
|
- * have been in since the last change, and account any
|
|
- * SOME and FULL time these may have resulted in.
|
|
- */
|
|
- write_seqcount_begin(&groupc->seq);
|
|
- now = cpu_clock(cpu);
|
|
-
|
|
/*
|
|
* Start with TSK_ONCPU, which doesn't have a corresponding
|
|
* task count - it's just a boolean flag directly encoded in
|
|
@@ -851,7 +859,6 @@ static void psi_group_change(struct psi_group *group, int cpu,
|
|
|
|
groupc->state_mask = state_mask;
|
|
|
|
- write_seqcount_end(&groupc->seq);
|
|
return;
|
|
}
|
|
|
|
@@ -875,8 +882,6 @@ static void psi_group_change(struct psi_group *group, int cpu,
|
|
|
|
groupc->state_mask = state_mask;
|
|
|
|
- write_seqcount_end(&groupc->seq);
|
|
-
|
|
if (state_mask & group->rtpoll_states)
|
|
psi_schedule_rtpoll_work(group, 1, false);
|
|
|
|
@@ -911,24 +916,29 @@ static void psi_flags_change(struct task_struct *task, int clear, int set)
|
|
void psi_task_change(struct task_struct *task, int clear, int set)
|
|
{
|
|
int cpu = task_cpu(task);
|
|
- struct psi_group *group;
|
|
+ u64 now;
|
|
|
|
if (!task->pid)
|
|
return;
|
|
|
|
psi_flags_change(task, clear, set);
|
|
|
|
- group = task_psi_group(task);
|
|
- do {
|
|
- psi_group_change(group, cpu, clear, set, true);
|
|
- } while ((group = group->parent));
|
|
+ psi_write_begin(cpu);
|
|
+ now = cpu_clock(cpu);
|
|
+ for_each_group(group, task_psi_group(task))
|
|
+ psi_group_change(group, cpu, clear, set, now, true);
|
|
+ psi_write_end(cpu);
|
|
}
|
|
|
|
void psi_task_switch(struct task_struct *prev, struct task_struct *next,
|
|
bool sleep)
|
|
{
|
|
- struct psi_group *group, *common = NULL;
|
|
+ struct psi_group *common = NULL;
|
|
int cpu = task_cpu(prev);
|
|
+ u64 now;
|
|
+
|
|
+ psi_write_begin(cpu);
|
|
+ now = cpu_clock(cpu);
|
|
|
|
if (next->pid) {
|
|
psi_flags_change(next, 0, TSK_ONCPU);
|
|
@@ -937,16 +947,15 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
|
|
* ancestors with @prev, those will already have @prev's
|
|
* TSK_ONCPU bit set, and we can stop the iteration there.
|
|
*/
|
|
- group = task_psi_group(next);
|
|
- do {
|
|
- if (per_cpu_ptr(group->pcpu, cpu)->state_mask &
|
|
- PSI_ONCPU) {
|
|
+ for_each_group(group, task_psi_group(next)) {
|
|
+ struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
|
|
+
|
|
+ if (groupc->state_mask & PSI_ONCPU) {
|
|
common = group;
|
|
break;
|
|
}
|
|
-
|
|
- psi_group_change(group, cpu, 0, TSK_ONCPU, true);
|
|
- } while ((group = group->parent));
|
|
+ psi_group_change(group, cpu, 0, TSK_ONCPU, now, true);
|
|
+ }
|
|
}
|
|
|
|
if (prev->pid) {
|
|
@@ -979,12 +988,11 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
|
|
|
|
psi_flags_change(prev, clear, set);
|
|
|
|
- group = task_psi_group(prev);
|
|
- do {
|
|
+ for_each_group(group, task_psi_group(prev)) {
|
|
if (group == common)
|
|
break;
|
|
- psi_group_change(group, cpu, clear, set, wake_clock);
|
|
- } while ((group = group->parent));
|
|
+ psi_group_change(group, cpu, clear, set, now, wake_clock);
|
|
+ }
|
|
|
|
/*
|
|
* TSK_ONCPU is handled up to the common ancestor. If there are
|
|
@@ -994,27 +1002,27 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
|
|
*/
|
|
if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) {
|
|
clear &= ~TSK_ONCPU;
|
|
- for (; group; group = group->parent)
|
|
- psi_group_change(group, cpu, clear, set, wake_clock);
|
|
+ for_each_group(group, common)
|
|
+ psi_group_change(group, cpu, clear, set, now, wake_clock);
|
|
}
|
|
}
|
|
+ psi_write_end(cpu);
|
|
}
|
|
|
|
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
|
void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev)
|
|
{
|
|
int cpu = task_cpu(curr);
|
|
- struct psi_group *group;
|
|
struct psi_group_cpu *groupc;
|
|
s64 delta;
|
|
u64 irq;
|
|
+ u64 now;
|
|
|
|
if (!curr->pid)
|
|
return;
|
|
|
|
lockdep_assert_rq_held(rq);
|
|
- group = task_psi_group(curr);
|
|
- if (prev && task_psi_group(prev) == group)
|
|
+ if (prev && task_psi_group(prev) == task_psi_group(curr))
|
|
return;
|
|
|
|
irq = irq_time_read(cpu);
|
|
@@ -1023,25 +1031,22 @@ void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_st
|
|
return;
|
|
rq->psi_irq_time = irq;
|
|
|
|
- do {
|
|
- u64 now;
|
|
+ psi_write_begin(cpu);
|
|
+ now = cpu_clock(cpu);
|
|
|
|
+ for_each_group(group, task_psi_group(curr)) {
|
|
if (!group->enabled)
|
|
continue;
|
|
|
|
groupc = per_cpu_ptr(group->pcpu, cpu);
|
|
|
|
- write_seqcount_begin(&groupc->seq);
|
|
- now = cpu_clock(cpu);
|
|
-
|
|
record_times(groupc, now);
|
|
groupc->times[PSI_IRQ_FULL] += delta;
|
|
|
|
- write_seqcount_end(&groupc->seq);
|
|
-
|
|
if (group->rtpoll_states & (1 << PSI_IRQ_FULL))
|
|
psi_schedule_rtpoll_work(group, 1, false);
|
|
- } while ((group = group->parent));
|
|
+ }
|
|
+ psi_write_end(cpu);
|
|
}
|
|
#endif
|
|
|
|
@@ -1229,12 +1234,14 @@ void psi_cgroup_restart(struct psi_group *group)
|
|
return;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
- struct rq *rq = cpu_rq(cpu);
|
|
- struct rq_flags rf;
|
|
+ u64 now;
|
|
|
|
- rq_lock_irq(rq, &rf);
|
|
- psi_group_change(group, cpu, 0, 0, true);
|
|
- rq_unlock_irq(rq, &rf);
|
|
+ guard(rq_lock_irq)(cpu_rq(cpu));
|
|
+
|
|
+ psi_write_begin(cpu);
|
|
+ now = cpu_clock(cpu);
|
|
+ psi_group_change(group, cpu, 0, 0, now, true);
|
|
+ psi_write_end(cpu);
|
|
}
|
|
}
|
|
#endif /* CONFIG_CGROUPS */
|
|
diff --git a/kernel/trace/preemptirq_delay_test.c b/kernel/trace/preemptirq_delay_test.c
|
|
index cb0871fbdb07f0..8af92dbe98f07b 100644
|
|
--- a/kernel/trace/preemptirq_delay_test.c
|
|
+++ b/kernel/trace/preemptirq_delay_test.c
|
|
@@ -119,12 +119,15 @@ static int preemptirq_delay_run(void *data)
|
|
{
|
|
int i;
|
|
int s = MIN(burst_size, NR_TEST_FUNCS);
|
|
- struct cpumask cpu_mask;
|
|
+ cpumask_var_t cpu_mask;
|
|
+
|
|
+ if (!alloc_cpumask_var(&cpu_mask, GFP_KERNEL))
|
|
+ return -ENOMEM;
|
|
|
|
if (cpu_affinity > -1) {
|
|
- cpumask_clear(&cpu_mask);
|
|
- cpumask_set_cpu(cpu_affinity, &cpu_mask);
|
|
- if (set_cpus_allowed_ptr(current, &cpu_mask))
|
|
+ cpumask_clear(cpu_mask);
|
|
+ cpumask_set_cpu(cpu_affinity, cpu_mask);
|
|
+ if (set_cpus_allowed_ptr(current, cpu_mask))
|
|
pr_err("cpu_affinity:%d, failed\n", cpu_affinity);
|
|
}
|
|
|
|
@@ -141,6 +144,8 @@ static int preemptirq_delay_run(void *data)
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
+ free_cpumask_var(cpu_mask);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/kernel/ucount.c b/kernel/ucount.c
|
|
index 3456018730b6c8..a7fd89693bd2a1 100644
|
|
--- a/kernel/ucount.c
|
|
+++ b/kernel/ucount.c
|
|
@@ -213,7 +213,7 @@ void put_ucounts(struct ucounts *ucounts)
|
|
}
|
|
}
|
|
|
|
-static inline bool atomic_long_inc_below(atomic_long_t *v, int u)
|
|
+static inline bool atomic_long_inc_below(atomic_long_t *v, long u)
|
|
{
|
|
long c, old;
|
|
c = atomic_long_read(v);
|
|
diff --git a/mm/hmm.c b/mm/hmm.c
|
|
index 277ddcab4947d9..a36e1b4046dbd6 100644
|
|
--- a/mm/hmm.c
|
|
+++ b/mm/hmm.c
|
|
@@ -173,6 +173,7 @@ static inline unsigned long hmm_pfn_flags_order(unsigned long order)
|
|
return order << HMM_PFN_ORDER_SHIFT;
|
|
}
|
|
|
|
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
|
|
pmd_t pmd)
|
|
{
|
|
@@ -183,7 +184,6 @@ static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
|
|
hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
|
|
}
|
|
|
|
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
|
|
unsigned long end, unsigned long hmm_pfns[],
|
|
pmd_t pmd)
|
|
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
|
|
index 8516ba62c54559..3b22ce3aa95bb5 100644
|
|
--- a/net/bluetooth/hci_event.c
|
|
+++ b/net/bluetooth/hci_event.c
|
|
@@ -6216,6 +6216,11 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
|
|
|
|
static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
|
|
{
|
|
+ u16 pdu_type = evt_type & ~LE_EXT_ADV_DATA_STATUS_MASK;
|
|
+
|
|
+ if (!pdu_type)
|
|
+ return LE_ADV_NONCONN_IND;
|
|
+
|
|
if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
|
|
switch (evt_type) {
|
|
case LE_LEGACY_ADV_IND:
|
|
@@ -6247,8 +6252,7 @@ static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
|
|
if (evt_type & LE_EXT_ADV_SCAN_IND)
|
|
return LE_ADV_SCAN_IND;
|
|
|
|
- if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
|
|
- evt_type & LE_EXT_ADV_DIRECT_IND)
|
|
+ if (evt_type & LE_EXT_ADV_DIRECT_IND)
|
|
return LE_ADV_NONCONN_IND;
|
|
|
|
invalid:
|
|
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
|
|
index 8480684f276251..10eeace2278b72 100644
|
|
--- a/net/caif/cfctrl.c
|
|
+++ b/net/caif/cfctrl.c
|
|
@@ -351,17 +351,154 @@ int cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
|
|
return found;
|
|
}
|
|
|
|
+static int cfctrl_link_setup(struct cfctrl *cfctrl, struct cfpkt *pkt, u8 cmdrsp)
|
|
+{
|
|
+ u8 len;
|
|
+ u8 linkid = 0;
|
|
+ enum cfctrl_srv serv;
|
|
+ enum cfctrl_srv servtype;
|
|
+ u8 endpoint;
|
|
+ u8 physlinkid;
|
|
+ u8 prio;
|
|
+ u8 tmp;
|
|
+ u8 *cp;
|
|
+ int i;
|
|
+ struct cfctrl_link_param linkparam;
|
|
+ struct cfctrl_request_info rsp, *req;
|
|
+
|
|
+ memset(&linkparam, 0, sizeof(linkparam));
|
|
+
|
|
+ tmp = cfpkt_extr_head_u8(pkt);
|
|
+
|
|
+ serv = tmp & CFCTRL_SRV_MASK;
|
|
+ linkparam.linktype = serv;
|
|
+
|
|
+ servtype = tmp >> 4;
|
|
+ linkparam.chtype = servtype;
|
|
+
|
|
+ tmp = cfpkt_extr_head_u8(pkt);
|
|
+ physlinkid = tmp & 0x07;
|
|
+ prio = tmp >> 3;
|
|
+
|
|
+ linkparam.priority = prio;
|
|
+ linkparam.phyid = physlinkid;
|
|
+ endpoint = cfpkt_extr_head_u8(pkt);
|
|
+ linkparam.endpoint = endpoint & 0x03;
|
|
+
|
|
+ switch (serv) {
|
|
+ case CFCTRL_SRV_VEI:
|
|
+ case CFCTRL_SRV_DBG:
|
|
+ if (CFCTRL_ERR_BIT & cmdrsp)
|
|
+ break;
|
|
+ /* Link ID */
|
|
+ linkid = cfpkt_extr_head_u8(pkt);
|
|
+ break;
|
|
+ case CFCTRL_SRV_VIDEO:
|
|
+ tmp = cfpkt_extr_head_u8(pkt);
|
|
+ linkparam.u.video.connid = tmp;
|
|
+ if (CFCTRL_ERR_BIT & cmdrsp)
|
|
+ break;
|
|
+ /* Link ID */
|
|
+ linkid = cfpkt_extr_head_u8(pkt);
|
|
+ break;
|
|
+
|
|
+ case CFCTRL_SRV_DATAGRAM:
|
|
+ linkparam.u.datagram.connid = cfpkt_extr_head_u32(pkt);
|
|
+ if (CFCTRL_ERR_BIT & cmdrsp)
|
|
+ break;
|
|
+ /* Link ID */
|
|
+ linkid = cfpkt_extr_head_u8(pkt);
|
|
+ break;
|
|
+ case CFCTRL_SRV_RFM:
|
|
+ /* Construct a frame, convert
|
|
+ * DatagramConnectionID
|
|
+ * to network format long and copy it out...
|
|
+ */
|
|
+ linkparam.u.rfm.connid = cfpkt_extr_head_u32(pkt);
|
|
+ cp = (u8 *) linkparam.u.rfm.volume;
|
|
+ for (tmp = cfpkt_extr_head_u8(pkt);
|
|
+ cfpkt_more(pkt) && tmp != '\0';
|
|
+ tmp = cfpkt_extr_head_u8(pkt))
|
|
+ *cp++ = tmp;
|
|
+ *cp = '\0';
|
|
+
|
|
+ if (CFCTRL_ERR_BIT & cmdrsp)
|
|
+ break;
|
|
+ /* Link ID */
|
|
+ linkid = cfpkt_extr_head_u8(pkt);
|
|
+
|
|
+ break;
|
|
+ case CFCTRL_SRV_UTIL:
|
|
+ /* Construct a frame, convert
|
|
+ * DatagramConnectionID
|
|
+ * to network format long and copy it out...
|
|
+ */
|
|
+ /* Fifosize KB */
|
|
+ linkparam.u.utility.fifosize_kb = cfpkt_extr_head_u16(pkt);
|
|
+ /* Fifosize bufs */
|
|
+ linkparam.u.utility.fifosize_bufs = cfpkt_extr_head_u16(pkt);
|
|
+ /* name */
|
|
+ cp = (u8 *) linkparam.u.utility.name;
|
|
+ caif_assert(sizeof(linkparam.u.utility.name)
|
|
+ >= UTILITY_NAME_LENGTH);
|
|
+ for (i = 0; i < UTILITY_NAME_LENGTH && cfpkt_more(pkt); i++) {
|
|
+ tmp = cfpkt_extr_head_u8(pkt);
|
|
+ *cp++ = tmp;
|
|
+ }
|
|
+ /* Length */
|
|
+ len = cfpkt_extr_head_u8(pkt);
|
|
+ linkparam.u.utility.paramlen = len;
|
|
+ /* Param Data */
|
|
+ cp = linkparam.u.utility.params;
|
|
+ while (cfpkt_more(pkt) && len--) {
|
|
+ tmp = cfpkt_extr_head_u8(pkt);
|
|
+ *cp++ = tmp;
|
|
+ }
|
|
+ if (CFCTRL_ERR_BIT & cmdrsp)
|
|
+ break;
|
|
+ /* Link ID */
|
|
+ linkid = cfpkt_extr_head_u8(pkt);
|
|
+ /* Length */
|
|
+ len = cfpkt_extr_head_u8(pkt);
|
|
+ /* Param Data */
|
|
+ cfpkt_extr_head(pkt, NULL, len);
|
|
+ break;
|
|
+ default:
|
|
+ pr_warn("Request setup, invalid type (%d)\n", serv);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ rsp.cmd = CFCTRL_CMD_LINK_SETUP;
|
|
+ rsp.param = linkparam;
|
|
+ spin_lock_bh(&cfctrl->info_list_lock);
|
|
+ req = cfctrl_remove_req(cfctrl, &rsp);
|
|
+
|
|
+ if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
|
|
+ cfpkt_erroneous(pkt)) {
|
|
+ pr_err("Invalid O/E bit or parse error "
|
|
+ "on CAIF control channel\n");
|
|
+ cfctrl->res.reject_rsp(cfctrl->serv.layer.up, 0,
|
|
+ req ? req->client_layer : NULL);
|
|
+ } else {
|
|
+ cfctrl->res.linksetup_rsp(cfctrl->serv.layer.up, linkid,
|
|
+ serv, physlinkid,
|
|
+ req ? req->client_layer : NULL);
|
|
+ }
|
|
+
|
|
+ kfree(req);
|
|
+
|
|
+ spin_unlock_bh(&cfctrl->info_list_lock);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
|
|
{
|
|
u8 cmdrsp;
|
|
u8 cmd;
|
|
- int ret = -1;
|
|
- u8 len;
|
|
- u8 param[255];
|
|
+ int ret = 0;
|
|
u8 linkid = 0;
|
|
struct cfctrl *cfctrl = container_obj(layer);
|
|
- struct cfctrl_request_info rsp, *req;
|
|
-
|
|
|
|
cmdrsp = cfpkt_extr_head_u8(pkt);
|
|
cmd = cmdrsp & CFCTRL_CMD_MASK;
|
|
@@ -374,150 +511,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
|
|
|
|
switch (cmd) {
|
|
case CFCTRL_CMD_LINK_SETUP:
|
|
- {
|
|
- enum cfctrl_srv serv;
|
|
- enum cfctrl_srv servtype;
|
|
- u8 endpoint;
|
|
- u8 physlinkid;
|
|
- u8 prio;
|
|
- u8 tmp;
|
|
- u8 *cp;
|
|
- int i;
|
|
- struct cfctrl_link_param linkparam;
|
|
- memset(&linkparam, 0, sizeof(linkparam));
|
|
-
|
|
- tmp = cfpkt_extr_head_u8(pkt);
|
|
-
|
|
- serv = tmp & CFCTRL_SRV_MASK;
|
|
- linkparam.linktype = serv;
|
|
-
|
|
- servtype = tmp >> 4;
|
|
- linkparam.chtype = servtype;
|
|
-
|
|
- tmp = cfpkt_extr_head_u8(pkt);
|
|
- physlinkid = tmp & 0x07;
|
|
- prio = tmp >> 3;
|
|
-
|
|
- linkparam.priority = prio;
|
|
- linkparam.phyid = physlinkid;
|
|
- endpoint = cfpkt_extr_head_u8(pkt);
|
|
- linkparam.endpoint = endpoint & 0x03;
|
|
-
|
|
- switch (serv) {
|
|
- case CFCTRL_SRV_VEI:
|
|
- case CFCTRL_SRV_DBG:
|
|
- if (CFCTRL_ERR_BIT & cmdrsp)
|
|
- break;
|
|
- /* Link ID */
|
|
- linkid = cfpkt_extr_head_u8(pkt);
|
|
- break;
|
|
- case CFCTRL_SRV_VIDEO:
|
|
- tmp = cfpkt_extr_head_u8(pkt);
|
|
- linkparam.u.video.connid = tmp;
|
|
- if (CFCTRL_ERR_BIT & cmdrsp)
|
|
- break;
|
|
- /* Link ID */
|
|
- linkid = cfpkt_extr_head_u8(pkt);
|
|
- break;
|
|
-
|
|
- case CFCTRL_SRV_DATAGRAM:
|
|
- linkparam.u.datagram.connid =
|
|
- cfpkt_extr_head_u32(pkt);
|
|
- if (CFCTRL_ERR_BIT & cmdrsp)
|
|
- break;
|
|
- /* Link ID */
|
|
- linkid = cfpkt_extr_head_u8(pkt);
|
|
- break;
|
|
- case CFCTRL_SRV_RFM:
|
|
- /* Construct a frame, convert
|
|
- * DatagramConnectionID
|
|
- * to network format long and copy it out...
|
|
- */
|
|
- linkparam.u.rfm.connid =
|
|
- cfpkt_extr_head_u32(pkt);
|
|
- cp = (u8 *) linkparam.u.rfm.volume;
|
|
- for (tmp = cfpkt_extr_head_u8(pkt);
|
|
- cfpkt_more(pkt) && tmp != '\0';
|
|
- tmp = cfpkt_extr_head_u8(pkt))
|
|
- *cp++ = tmp;
|
|
- *cp = '\0';
|
|
-
|
|
- if (CFCTRL_ERR_BIT & cmdrsp)
|
|
- break;
|
|
- /* Link ID */
|
|
- linkid = cfpkt_extr_head_u8(pkt);
|
|
-
|
|
- break;
|
|
- case CFCTRL_SRV_UTIL:
|
|
- /* Construct a frame, convert
|
|
- * DatagramConnectionID
|
|
- * to network format long and copy it out...
|
|
- */
|
|
- /* Fifosize KB */
|
|
- linkparam.u.utility.fifosize_kb =
|
|
- cfpkt_extr_head_u16(pkt);
|
|
- /* Fifosize bufs */
|
|
- linkparam.u.utility.fifosize_bufs =
|
|
- cfpkt_extr_head_u16(pkt);
|
|
- /* name */
|
|
- cp = (u8 *) linkparam.u.utility.name;
|
|
- caif_assert(sizeof(linkparam.u.utility.name)
|
|
- >= UTILITY_NAME_LENGTH);
|
|
- for (i = 0;
|
|
- i < UTILITY_NAME_LENGTH
|
|
- && cfpkt_more(pkt); i++) {
|
|
- tmp = cfpkt_extr_head_u8(pkt);
|
|
- *cp++ = tmp;
|
|
- }
|
|
- /* Length */
|
|
- len = cfpkt_extr_head_u8(pkt);
|
|
- linkparam.u.utility.paramlen = len;
|
|
- /* Param Data */
|
|
- cp = linkparam.u.utility.params;
|
|
- while (cfpkt_more(pkt) && len--) {
|
|
- tmp = cfpkt_extr_head_u8(pkt);
|
|
- *cp++ = tmp;
|
|
- }
|
|
- if (CFCTRL_ERR_BIT & cmdrsp)
|
|
- break;
|
|
- /* Link ID */
|
|
- linkid = cfpkt_extr_head_u8(pkt);
|
|
- /* Length */
|
|
- len = cfpkt_extr_head_u8(pkt);
|
|
- /* Param Data */
|
|
- cfpkt_extr_head(pkt, ¶m, len);
|
|
- break;
|
|
- default:
|
|
- pr_warn("Request setup, invalid type (%d)\n",
|
|
- serv);
|
|
- goto error;
|
|
- }
|
|
-
|
|
- rsp.cmd = cmd;
|
|
- rsp.param = linkparam;
|
|
- spin_lock_bh(&cfctrl->info_list_lock);
|
|
- req = cfctrl_remove_req(cfctrl, &rsp);
|
|
-
|
|
- if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
|
|
- cfpkt_erroneous(pkt)) {
|
|
- pr_err("Invalid O/E bit or parse error "
|
|
- "on CAIF control channel\n");
|
|
- cfctrl->res.reject_rsp(cfctrl->serv.layer.up,
|
|
- 0,
|
|
- req ? req->client_layer
|
|
- : NULL);
|
|
- } else {
|
|
- cfctrl->res.linksetup_rsp(cfctrl->serv.
|
|
- layer.up, linkid,
|
|
- serv, physlinkid,
|
|
- req ? req->
|
|
- client_layer : NULL);
|
|
- }
|
|
-
|
|
- kfree(req);
|
|
-
|
|
- spin_unlock_bh(&cfctrl->info_list_lock);
|
|
- }
|
|
+ ret = cfctrl_link_setup(cfctrl, pkt, cmdrsp);
|
|
break;
|
|
case CFCTRL_CMD_LINK_DESTROY:
|
|
linkid = cfpkt_extr_head_u8(pkt);
|
|
@@ -544,9 +538,9 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
|
|
break;
|
|
default:
|
|
pr_err("Unrecognized Control Frame\n");
|
|
+ ret = -1;
|
|
goto error;
|
|
}
|
|
- ret = 0;
|
|
error:
|
|
cfpkt_destroy(pkt);
|
|
return ret;
|
|
diff --git a/net/core/dst.c b/net/core/dst.c
|
|
index aad197e761cb4c..2513665696f693 100644
|
|
--- a/net/core/dst.c
|
|
+++ b/net/core/dst.c
|
|
@@ -150,8 +150,8 @@ void dst_dev_put(struct dst_entry *dst)
|
|
dst->obsolete = DST_OBSOLETE_DEAD;
|
|
if (dst->ops->ifdown)
|
|
dst->ops->ifdown(dst, dev);
|
|
- dst->input = dst_discard;
|
|
- dst->output = dst_discard_out;
|
|
+ WRITE_ONCE(dst->input, dst_discard);
|
|
+ WRITE_ONCE(dst->output, dst_discard_out);
|
|
dst->dev = blackhole_netdev;
|
|
netdev_ref_replace(dev, blackhole_netdev, &dst->dev_tracker,
|
|
GFP_ATOMIC);
|
|
diff --git a/net/core/filter.c b/net/core/filter.c
|
|
index 3e10b4c8338f92..7afb7658c38835 100644
|
|
--- a/net/core/filter.c
|
|
+++ b/net/core/filter.c
|
|
@@ -9330,6 +9330,9 @@ static bool flow_dissector_is_valid_access(int off, int size,
|
|
if (off < 0 || off >= sizeof(struct __sk_buff))
|
|
return false;
|
|
|
|
+ if (off % size != 0)
|
|
+ return false;
|
|
+
|
|
if (type == BPF_WRITE)
|
|
return false;
|
|
|
|
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
|
|
index 1a4d2a61b060bd..2bdb1e84c6c8a8 100644
|
|
--- a/net/core/netpoll.c
|
|
+++ b/net/core/netpoll.c
|
|
@@ -791,6 +791,13 @@ int netpoll_setup(struct netpoll *np)
|
|
if (err)
|
|
goto put;
|
|
rtnl_unlock();
|
|
+
|
|
+ /* Make sure all NAPI polls which started before dev->npinfo
|
|
+ * was visible have exited before we start calling NAPI poll.
|
|
+ * NAPI skips locking if dev->npinfo is NULL.
|
|
+ */
|
|
+ synchronize_rcu();
|
|
+
|
|
return 0;
|
|
|
|
put:
|
|
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
|
|
index c3169e1e635248..6225547808a6ba 100644
|
|
--- a/net/core/skmsg.c
|
|
+++ b/net/core/skmsg.c
|
|
@@ -655,6 +655,13 @@ static void sk_psock_backlog(struct work_struct *work)
|
|
bool ingress;
|
|
int ret;
|
|
|
|
+ /* If sk is quickly removed from the map and then added back, the old
|
|
+ * psock should not be scheduled, because there are now two psocks
|
|
+ * pointing to the same sk.
|
|
+ */
|
|
+ if (!sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
|
|
+ return;
|
|
+
|
|
/* Increment the psock refcnt to synchronize with close(fd) path in
|
|
* sock_map_close(), ensuring we wait for backlog thread completion
|
|
* before sk_socket freed. If refcnt increment fails, it indicates
|
|
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
|
|
index 8ee1ad2d8c13f6..6ee77f7f911473 100644
|
|
--- a/net/ipv4/route.c
|
|
+++ b/net/ipv4/route.c
|
|
@@ -1699,8 +1699,8 @@ struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
|
|
else if (rt->rt_gw_family == AF_INET6)
|
|
new_rt->rt_gw6 = rt->rt_gw6;
|
|
|
|
- new_rt->dst.input = rt->dst.input;
|
|
- new_rt->dst.output = rt->dst.output;
|
|
+ new_rt->dst.input = READ_ONCE(rt->dst.input);
|
|
+ new_rt->dst.output = READ_ONCE(rt->dst.output);
|
|
new_rt->dst.error = rt->dst.error;
|
|
new_rt->dst.lastuse = jiffies;
|
|
new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
|
|
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
|
|
index f7b95bc8ad60bb..c6d00817ad3fd9 100644
|
|
--- a/net/ipv4/tcp_input.c
|
|
+++ b/net/ipv4/tcp_input.c
|
|
@@ -4872,8 +4872,9 @@ static void tcp_ofo_queue(struct sock *sk)
|
|
|
|
if (before(TCP_SKB_CB(skb)->seq, dsack_high)) {
|
|
__u32 dsack = dsack_high;
|
|
+
|
|
if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
|
|
- dsack_high = TCP_SKB_CB(skb)->end_seq;
|
|
+ dsack = TCP_SKB_CB(skb)->end_seq;
|
|
tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
|
|
}
|
|
p = rb_next(p);
|
|
@@ -4940,6 +4941,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
|
return;
|
|
}
|
|
|
|
+ tcp_measure_rcv_mss(sk, skb);
|
|
/* Disable header prediction. */
|
|
tp->pred_flags = 0;
|
|
inet_csk_schedule_ack(sk);
|
|
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
|
|
index 023ac39041a214..c44136cbbaa1f4 100644
|
|
--- a/net/ipv6/ip6_fib.c
|
|
+++ b/net/ipv6/ip6_fib.c
|
|
@@ -437,15 +437,17 @@ struct fib6_dump_arg {
|
|
static int fib6_rt_dump(struct fib6_info *rt, struct fib6_dump_arg *arg)
|
|
{
|
|
enum fib_event_type fib_event = FIB_EVENT_ENTRY_REPLACE;
|
|
+ unsigned int nsiblings;
|
|
int err;
|
|
|
|
if (!rt || rt == arg->net->ipv6.fib6_null_entry)
|
|
return 0;
|
|
|
|
- if (rt->fib6_nsiblings)
|
|
+ nsiblings = READ_ONCE(rt->fib6_nsiblings);
|
|
+ if (nsiblings)
|
|
err = call_fib6_multipath_entry_notifier(arg->nb, fib_event,
|
|
rt,
|
|
- rt->fib6_nsiblings,
|
|
+ nsiblings,
|
|
arg->extack);
|
|
else
|
|
err = call_fib6_entry_notifier(arg->nb, fib_event, rt,
|
|
@@ -1118,7 +1120,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
|
|
|
|
if (rt6_duplicate_nexthop(iter, rt)) {
|
|
if (rt->fib6_nsiblings)
|
|
- rt->fib6_nsiblings = 0;
|
|
+ WRITE_ONCE(rt->fib6_nsiblings, 0);
|
|
if (!(iter->fib6_flags & RTF_EXPIRES))
|
|
return -EEXIST;
|
|
if (!(rt->fib6_flags & RTF_EXPIRES))
|
|
@@ -1144,7 +1146,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
|
|
*/
|
|
if (rt_can_ecmp &&
|
|
rt6_qualify_for_ecmp(iter))
|
|
- rt->fib6_nsiblings++;
|
|
+ WRITE_ONCE(rt->fib6_nsiblings,
|
|
+ rt->fib6_nsiblings + 1);
|
|
}
|
|
|
|
if (iter->fib6_metric > rt->fib6_metric)
|
|
@@ -1194,7 +1197,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
|
|
fib6_nsiblings = 0;
|
|
list_for_each_entry_safe(sibling, temp_sibling,
|
|
&rt->fib6_siblings, fib6_siblings) {
|
|
- sibling->fib6_nsiblings++;
|
|
+ WRITE_ONCE(sibling->fib6_nsiblings,
|
|
+ sibling->fib6_nsiblings + 1);
|
|
BUG_ON(sibling->fib6_nsiblings != rt->fib6_nsiblings);
|
|
fib6_nsiblings++;
|
|
}
|
|
@@ -1239,8 +1243,9 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
|
|
list_for_each_entry_safe(sibling, next_sibling,
|
|
&rt->fib6_siblings,
|
|
fib6_siblings)
|
|
- sibling->fib6_nsiblings--;
|
|
- rt->fib6_nsiblings = 0;
|
|
+ WRITE_ONCE(sibling->fib6_nsiblings,
|
|
+ sibling->fib6_nsiblings - 1);
|
|
+ WRITE_ONCE(rt->fib6_nsiblings, 0);
|
|
list_del_rcu(&rt->fib6_siblings);
|
|
rt6_multipath_rebalance(next_sibling);
|
|
return err;
|
|
@@ -1952,8 +1957,9 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
|
|
notify_del = true;
|
|
list_for_each_entry_safe(sibling, next_sibling,
|
|
&rt->fib6_siblings, fib6_siblings)
|
|
- sibling->fib6_nsiblings--;
|
|
- rt->fib6_nsiblings = 0;
|
|
+ WRITE_ONCE(sibling->fib6_nsiblings,
|
|
+ sibling->fib6_nsiblings - 1);
|
|
+ WRITE_ONCE(rt->fib6_nsiblings, 0);
|
|
list_del_rcu(&rt->fib6_siblings);
|
|
rt6_multipath_rebalance(next_sibling);
|
|
}
|
|
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
|
|
index 7f014a8969fb25..84b17eeaa57cc7 100644
|
|
--- a/net/ipv6/ip6_offload.c
|
|
+++ b/net/ipv6/ip6_offload.c
|
|
@@ -150,7 +150,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
|
|
|
|
ops = rcu_dereference(inet6_offloads[proto]);
|
|
if (likely(ops && ops->callbacks.gso_segment)) {
|
|
- skb_reset_transport_header(skb);
|
|
+ if (!skb_reset_transport_header_careful(skb))
|
|
+ goto out;
|
|
+
|
|
segs = ops->callbacks.gso_segment(skb, features);
|
|
if (!segs)
|
|
skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
|
|
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
|
|
index 7f19868d7d6c6b..18451dfeae6822 100644
|
|
--- a/net/ipv6/ip6mr.c
|
|
+++ b/net/ipv6/ip6mr.c
|
|
@@ -2035,6 +2035,7 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
|
|
struct sk_buff *skb, int vifi)
|
|
{
|
|
struct vif_device *vif = &mrt->vif_table[vifi];
|
|
+ struct net_device *indev = skb->dev;
|
|
struct net_device *vif_dev;
|
|
struct ipv6hdr *ipv6h;
|
|
struct dst_entry *dst;
|
|
@@ -2097,7 +2098,7 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
|
|
IP6CB(skb)->flags |= IP6SKB_FORWARDED;
|
|
|
|
return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
|
|
- net, NULL, skb, skb->dev, vif_dev,
|
|
+ net, NULL, skb, indev, skb->dev,
|
|
ip6mr_forward2_finish);
|
|
|
|
out_free:
|
|
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
|
|
index 53197087353a7b..eb9e505f71f979 100644
|
|
--- a/net/ipv6/route.c
|
|
+++ b/net/ipv6/route.c
|
|
@@ -5230,7 +5230,8 @@ static void ip6_route_mpath_notify(struct fib6_info *rt,
|
|
*/
|
|
rcu_read_lock();
|
|
|
|
- if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
|
|
+ if ((nlflags & NLM_F_APPEND) && rt_last &&
|
|
+ READ_ONCE(rt_last->fib6_nsiblings)) {
|
|
rt = list_first_or_null_rcu(&rt_last->fib6_siblings,
|
|
struct fib6_info,
|
|
fib6_siblings);
|
|
@@ -5577,32 +5578,34 @@ static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg)
|
|
|
|
static size_t rt6_nlmsg_size(struct fib6_info *f6i)
|
|
{
|
|
+ struct fib6_info *sibling;
|
|
+ struct fib6_nh *nh;
|
|
int nexthop_len;
|
|
|
|
if (f6i->nh) {
|
|
nexthop_len = nla_total_size(4); /* RTA_NH_ID */
|
|
nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
|
|
&nexthop_len);
|
|
- } else {
|
|
- struct fib6_nh *nh = f6i->fib6_nh;
|
|
- struct fib6_info *sibling;
|
|
-
|
|
- nexthop_len = 0;
|
|
- if (f6i->fib6_nsiblings) {
|
|
- rt6_nh_nlmsg_size(nh, &nexthop_len);
|
|
-
|
|
- rcu_read_lock();
|
|
+ goto common;
|
|
+ }
|
|
|
|
- list_for_each_entry_rcu(sibling, &f6i->fib6_siblings,
|
|
- fib6_siblings) {
|
|
- rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len);
|
|
- }
|
|
+ rcu_read_lock();
|
|
+retry:
|
|
+ nh = f6i->fib6_nh;
|
|
+ nexthop_len = 0;
|
|
+ if (READ_ONCE(f6i->fib6_nsiblings)) {
|
|
+ rt6_nh_nlmsg_size(nh, &nexthop_len);
|
|
|
|
- rcu_read_unlock();
|
|
+ list_for_each_entry_rcu(sibling, &f6i->fib6_siblings,
|
|
+ fib6_siblings) {
|
|
+ rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len);
|
|
+ if (!READ_ONCE(f6i->fib6_nsiblings))
|
|
+ goto retry;
|
|
}
|
|
- nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
|
|
}
|
|
-
|
|
+ rcu_read_unlock();
|
|
+ nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
|
|
+common:
|
|
return NLMSG_ALIGN(sizeof(struct rtmsg))
|
|
+ nla_total_size(16) /* RTA_SRC */
|
|
+ nla_total_size(16) /* RTA_DST */
|
|
@@ -5761,7 +5764,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
|
|
if (dst->lwtstate &&
|
|
lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
|
|
goto nla_put_failure;
|
|
- } else if (rt->fib6_nsiblings) {
|
|
+ } else if (READ_ONCE(rt->fib6_nsiblings)) {
|
|
struct fib6_info *sibling;
|
|
struct nlattr *mp;
|
|
|
|
@@ -5863,16 +5866,21 @@ static bool fib6_info_uses_dev(const struct fib6_info *f6i,
|
|
if (f6i->fib6_nh->fib_nh_dev == dev)
|
|
return true;
|
|
|
|
- if (f6i->fib6_nsiblings) {
|
|
- struct fib6_info *sibling, *next_sibling;
|
|
+ if (READ_ONCE(f6i->fib6_nsiblings)) {
|
|
+ const struct fib6_info *sibling;
|
|
|
|
- list_for_each_entry_safe(sibling, next_sibling,
|
|
- &f6i->fib6_siblings, fib6_siblings) {
|
|
- if (sibling->fib6_nh->fib_nh_dev == dev)
|
|
+ rcu_read_lock();
|
|
+ list_for_each_entry_rcu(sibling, &f6i->fib6_siblings,
|
|
+ fib6_siblings) {
|
|
+ if (sibling->fib6_nh->fib_nh_dev == dev) {
|
|
+ rcu_read_unlock();
|
|
return true;
|
|
+ }
|
|
+ if (!READ_ONCE(f6i->fib6_nsiblings))
|
|
+ break;
|
|
}
|
|
+ rcu_read_unlock();
|
|
}
|
|
-
|
|
return false;
|
|
}
|
|
|
|
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
|
|
index a3c5d4d995db02..3ff7f38394a6bc 100644
|
|
--- a/net/mac80211/cfg.c
|
|
+++ b/net/mac80211/cfg.c
|
|
@@ -1099,13 +1099,13 @@ ieee80211_copy_rnr_beacon(u8 *pos, struct cfg80211_rnr_elems *dst,
|
|
{
|
|
int i, offset = 0;
|
|
|
|
+ dst->cnt = src->cnt;
|
|
for (i = 0; i < src->cnt; i++) {
|
|
memcpy(pos + offset, src->elem[i].data, src->elem[i].len);
|
|
dst->elem[i].len = src->elem[i].len;
|
|
dst->elem[i].data = pos + offset;
|
|
offset += dst->elem[i].len;
|
|
}
|
|
- dst->cnt = src->cnt;
|
|
|
|
return offset;
|
|
}
|
|
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
|
|
index a4af3b7675efae..f3cdbd2133f67c 100644
|
|
--- a/net/mac80211/tdls.c
|
|
+++ b/net/mac80211/tdls.c
|
|
@@ -1450,7 +1450,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
|
|
if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
|
|
return -ENOTSUPP;
|
|
|
|
- if (sdata->vif.type != NL80211_IFTYPE_STATION)
|
|
+ if (sdata->vif.type != NL80211_IFTYPE_STATION || !sdata->vif.cfg.assoc)
|
|
return -EINVAL;
|
|
|
|
switch (oper) {
|
|
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
|
|
index ec5469add68a27..7eddcb6f964555 100644
|
|
--- a/net/mac80211/tx.c
|
|
+++ b/net/mac80211/tx.c
|
|
@@ -629,6 +629,12 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
|
|
else
|
|
tx->key = NULL;
|
|
|
|
+ if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
|
|
+ if (tx->key && tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
|
|
+ info->control.hw_key = &tx->key->conf;
|
|
+ return TX_CONTINUE;
|
|
+ }
|
|
+
|
|
if (tx->key) {
|
|
bool skip_hw = false;
|
|
|
|
@@ -1451,7 +1457,7 @@ static void ieee80211_txq_enqueue(struct ieee80211_local *local,
|
|
{
|
|
struct fq *fq = &local->fq;
|
|
struct fq_tin *tin = &txqi->tin;
|
|
- u32 flow_idx = fq_flow_idx(fq, skb);
|
|
+ u32 flow_idx;
|
|
|
|
ieee80211_set_skb_enqueue_time(skb);
|
|
|
|
@@ -1467,6 +1473,7 @@ static void ieee80211_txq_enqueue(struct ieee80211_local *local,
|
|
IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
|
|
__skb_queue_tail(&txqi->frags, skb);
|
|
} else {
|
|
+ flow_idx = fq_flow_idx(fq, skb);
|
|
fq_tin_enqueue(fq, tin, flow_idx, skb,
|
|
fq_skb_free_func);
|
|
}
|
|
@@ -3877,6 +3884,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
|
|
* The key can be removed while the packet was queued, so need to call
|
|
* this here to get the current key.
|
|
*/
|
|
+ info->control.hw_key = NULL;
|
|
r = ieee80211_tx_h_select_key(&tx);
|
|
if (r != TX_CONTINUE) {
|
|
ieee80211_free_txskb(&local->hw, skb);
|
|
@@ -4099,7 +4107,9 @@ void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
|
|
|
|
spin_lock_bh(&local->active_txq_lock[txq->ac]);
|
|
|
|
- has_queue = force || txq_has_queue(txq);
|
|
+ has_queue = force ||
|
|
+ (!test_bit(IEEE80211_TXQ_STOP, &txqi->flags) &&
|
|
+ txq_has_queue(txq));
|
|
if (list_empty(&txqi->schedule_order) &&
|
|
(has_queue || ieee80211_txq_keep_active(txqi))) {
|
|
/* If airtime accounting is active, always enqueue STAs at the
|
|
diff --git a/net/netfilter/nf_bpf_link.c b/net/netfilter/nf_bpf_link.c
|
|
index 2aad0562a41351..658e401b7937e4 100644
|
|
--- a/net/netfilter/nf_bpf_link.c
|
|
+++ b/net/netfilter/nf_bpf_link.c
|
|
@@ -17,7 +17,7 @@ static unsigned int nf_hook_run_bpf(void *bpf_prog, struct sk_buff *skb,
|
|
.skb = skb,
|
|
};
|
|
|
|
- return bpf_prog_run(prog, &ctx);
|
|
+ return bpf_prog_run_pin_on_cpu(prog, &ctx);
|
|
}
|
|
|
|
struct bpf_nf_link {
|
|
@@ -295,6 +295,9 @@ static bool nf_is_valid_access(int off, int size, enum bpf_access_type type,
|
|
if (off < 0 || off >= sizeof(struct bpf_nf_ctx))
|
|
return false;
|
|
|
|
+ if (off % size != 0)
|
|
+ return false;
|
|
+
|
|
if (type == BPF_WRITE)
|
|
return false;
|
|
|
|
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
|
|
index 18ae39cf418876..4ffb5ef79ca13f 100644
|
|
--- a/net/netfilter/nf_tables_api.c
|
|
+++ b/net/netfilter/nf_tables_api.c
|
|
@@ -979,11 +979,6 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
|
|
NFTA_TABLE_PAD))
|
|
goto nla_put_failure;
|
|
|
|
- if (event == NFT_MSG_DELTABLE) {
|
|
- nlmsg_end(skb, nlh);
|
|
- return 0;
|
|
- }
|
|
-
|
|
if (nla_put_be32(skb, NFTA_TABLE_FLAGS,
|
|
htonl(table->flags & NFT_TABLE_F_MASK)))
|
|
goto nla_put_failure;
|
|
@@ -1827,11 +1822,6 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
|
|
NFTA_CHAIN_PAD))
|
|
goto nla_put_failure;
|
|
|
|
- if (event == NFT_MSG_DELCHAIN && !hook_list) {
|
|
- nlmsg_end(skb, nlh);
|
|
- return 0;
|
|
- }
|
|
-
|
|
if (nft_is_base_chain(chain)) {
|
|
const struct nft_base_chain *basechain = nft_base_chain(chain);
|
|
struct nft_stats __percpu *stats;
|
|
@@ -3785,7 +3775,7 @@ void nf_tables_rule_destroy(const struct nft_ctx *ctx, struct nft_rule *rule)
|
|
/* can only be used if rule is no longer visible to dumps */
|
|
static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule)
|
|
{
|
|
- lockdep_commit_lock_is_held(ctx->net);
|
|
+ WARN_ON_ONCE(!lockdep_commit_lock_is_held(ctx->net));
|
|
|
|
nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE);
|
|
nf_tables_rule_destroy(ctx, rule);
|
|
@@ -4584,11 +4574,6 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
|
|
NFTA_SET_PAD))
|
|
goto nla_put_failure;
|
|
|
|
- if (event == NFT_MSG_DELSET) {
|
|
- nlmsg_end(skb, nlh);
|
|
- return 0;
|
|
- }
|
|
-
|
|
if (set->flags != 0)
|
|
if (nla_put_be32(skb, NFTA_SET_FLAGS, htonl(set->flags)))
|
|
goto nla_put_failure;
|
|
@@ -5586,7 +5571,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
|
|
struct nft_set_binding *binding,
|
|
enum nft_trans_phase phase)
|
|
{
|
|
- lockdep_commit_lock_is_held(ctx->net);
|
|
+ WARN_ON_ONCE(!lockdep_commit_lock_is_held(ctx->net));
|
|
|
|
switch (phase) {
|
|
case NFT_TRANS_PREPARE_ERROR:
|
|
@@ -7828,11 +7813,6 @@ static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net,
|
|
NFTA_OBJ_PAD))
|
|
goto nla_put_failure;
|
|
|
|
- if (event == NFT_MSG_DELOBJ) {
|
|
- nlmsg_end(skb, nlh);
|
|
- return 0;
|
|
- }
|
|
-
|
|
if (nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) ||
|
|
nla_put_be32(skb, NFTA_OBJ_USE, htonl(obj->use)) ||
|
|
nft_object_dump(skb, NFTA_OBJ_DATA, obj, reset))
|
|
@@ -8851,11 +8831,6 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
|
|
NFTA_FLOWTABLE_PAD))
|
|
goto nla_put_failure;
|
|
|
|
- if (event == NFT_MSG_DELFLOWTABLE && !hook_list) {
|
|
- nlmsg_end(skb, nlh);
|
|
- return 0;
|
|
- }
|
|
-
|
|
if (nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)) ||
|
|
nla_put_be32(skb, NFTA_FLOWTABLE_FLAGS, htonl(flowtable->data.flags)))
|
|
goto nla_put_failure;
|
|
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
|
|
index 7c6bf1c168131a..0ca1cdfc4095b6 100644
|
|
--- a/net/netfilter/xt_nfacct.c
|
|
+++ b/net/netfilter/xt_nfacct.c
|
|
@@ -38,8 +38,8 @@ nfacct_mt_checkentry(const struct xt_mtchk_param *par)
|
|
|
|
nfacct = nfnl_acct_find_get(par->net, info->name);
|
|
if (nfacct == NULL) {
|
|
- pr_info_ratelimited("accounting object `%s' does not exists\n",
|
|
- info->name);
|
|
+ pr_info_ratelimited("accounting object `%.*s' does not exist\n",
|
|
+ NFACCT_NAME_MAX, info->name);
|
|
return -ENOENT;
|
|
}
|
|
info->nfacct = nfacct;
|
|
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
|
|
index 9cac7cb78c0f54..9620f160be70bb 100644
|
|
--- a/net/packet/af_packet.c
|
|
+++ b/net/packet/af_packet.c
|
|
@@ -4566,10 +4566,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
|
|
spin_lock(&po->bind_lock);
|
|
was_running = packet_sock_flag(po, PACKET_SOCK_RUNNING);
|
|
num = po->num;
|
|
- if (was_running) {
|
|
- WRITE_ONCE(po->num, 0);
|
|
+ WRITE_ONCE(po->num, 0);
|
|
+ if (was_running)
|
|
__unregister_prot_hook(sk, false);
|
|
- }
|
|
+
|
|
spin_unlock(&po->bind_lock);
|
|
|
|
synchronize_net();
|
|
@@ -4601,10 +4601,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
|
|
mutex_unlock(&po->pg_vec_lock);
|
|
|
|
spin_lock(&po->bind_lock);
|
|
- if (was_running) {
|
|
- WRITE_ONCE(po->num, num);
|
|
+ WRITE_ONCE(po->num, num);
|
|
+ if (was_running)
|
|
register_prot_hook(sk);
|
|
- }
|
|
+
|
|
spin_unlock(&po->bind_lock);
|
|
if (pg_vec && (po->tp_version > TPACKET_V2)) {
|
|
/* Because we don't support block-based V3 on tx-ring */
|
|
diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
|
|
index 4d15b6a6169c45..8792244620b9d2 100644
|
|
--- a/net/sched/act_ctinfo.c
|
|
+++ b/net/sched/act_ctinfo.c
|
|
@@ -44,9 +44,9 @@ static void tcf_ctinfo_dscp_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
|
|
ipv4_change_dsfield(ip_hdr(skb),
|
|
INET_ECN_MASK,
|
|
newdscp);
|
|
- ca->stats_dscp_set++;
|
|
+ atomic64_inc(&ca->stats_dscp_set);
|
|
} else {
|
|
- ca->stats_dscp_error++;
|
|
+ atomic64_inc(&ca->stats_dscp_error);
|
|
}
|
|
}
|
|
break;
|
|
@@ -57,9 +57,9 @@ static void tcf_ctinfo_dscp_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
|
|
ipv6_change_dsfield(ipv6_hdr(skb),
|
|
INET_ECN_MASK,
|
|
newdscp);
|
|
- ca->stats_dscp_set++;
|
|
+ atomic64_inc(&ca->stats_dscp_set);
|
|
} else {
|
|
- ca->stats_dscp_error++;
|
|
+ atomic64_inc(&ca->stats_dscp_error);
|
|
}
|
|
}
|
|
break;
|
|
@@ -72,7 +72,7 @@ static void tcf_ctinfo_cpmark_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
|
|
struct tcf_ctinfo_params *cp,
|
|
struct sk_buff *skb)
|
|
{
|
|
- ca->stats_cpmark_set++;
|
|
+ atomic64_inc(&ca->stats_cpmark_set);
|
|
skb->mark = READ_ONCE(ct->mark) & cp->cpmarkmask;
|
|
}
|
|
|
|
@@ -323,15 +323,18 @@ static int tcf_ctinfo_dump(struct sk_buff *skb, struct tc_action *a,
|
|
}
|
|
|
|
if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_DSCP_SET,
|
|
- ci->stats_dscp_set, TCA_CTINFO_PAD))
|
|
+ atomic64_read(&ci->stats_dscp_set),
|
|
+ TCA_CTINFO_PAD))
|
|
goto nla_put_failure;
|
|
|
|
if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_DSCP_ERROR,
|
|
- ci->stats_dscp_error, TCA_CTINFO_PAD))
|
|
+ atomic64_read(&ci->stats_dscp_error),
|
|
+ TCA_CTINFO_PAD))
|
|
goto nla_put_failure;
|
|
|
|
if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_CPMARK_SET,
|
|
- ci->stats_cpmark_set, TCA_CTINFO_PAD))
|
|
+ atomic64_read(&ci->stats_cpmark_set),
|
|
+ TCA_CTINFO_PAD))
|
|
goto nla_put_failure;
|
|
|
|
spin_unlock_bh(&ci->tcf_lock);
|
|
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
|
|
index 793009f445c03b..a0e3f3bae5361d 100644
|
|
--- a/net/sched/sch_mqprio.c
|
|
+++ b/net/sched/sch_mqprio.c
|
|
@@ -152,7 +152,7 @@ static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt,
|
|
static const struct
|
|
nla_policy mqprio_tc_entry_policy[TCA_MQPRIO_TC_ENTRY_MAX + 1] = {
|
|
[TCA_MQPRIO_TC_ENTRY_INDEX] = NLA_POLICY_MAX(NLA_U32,
|
|
- TC_QOPT_MAX_QUEUE),
|
|
+ TC_QOPT_MAX_QUEUE - 1),
|
|
[TCA_MQPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32,
|
|
TC_FP_EXPRESS,
|
|
TC_FP_PREEMPTIBLE),
|
|
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
|
|
index 447d3e836a24f0..0ad231e94e14d3 100644
|
|
--- a/net/sched/sch_netem.c
|
|
+++ b/net/sched/sch_netem.c
|
|
@@ -972,6 +972,41 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
|
|
return 0;
|
|
}
|
|
|
|
+static const struct Qdisc_class_ops netem_class_ops;
|
|
+
|
|
+static int check_netem_in_tree(struct Qdisc *sch, bool duplicates,
|
|
+ struct netlink_ext_ack *extack)
|
|
+{
|
|
+ struct Qdisc *root, *q;
|
|
+ unsigned int i;
|
|
+
|
|
+ root = qdisc_root_sleeping(sch);
|
|
+
|
|
+ if (sch != root && root->ops->cl_ops == &netem_class_ops) {
|
|
+ if (duplicates ||
|
|
+ ((struct netem_sched_data *)qdisc_priv(root))->duplicate)
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ if (!qdisc_dev(root))
|
|
+ return 0;
|
|
+
|
|
+ hash_for_each(qdisc_dev(root)->qdisc_hash, i, q, hash) {
|
|
+ if (sch != q && q->ops->cl_ops == &netem_class_ops) {
|
|
+ if (duplicates ||
|
|
+ ((struct netem_sched_data *)qdisc_priv(q))->duplicate)
|
|
+ goto err;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err:
|
|
+ NL_SET_ERR_MSG(extack,
|
|
+ "netem: cannot mix duplicating netems with other netems in tree");
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
/* Parse netlink message to set options */
|
|
static int netem_change(struct Qdisc *sch, struct nlattr *opt,
|
|
struct netlink_ext_ack *extack)
|
|
@@ -1030,6 +1065,11 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
|
|
q->gap = qopt->gap;
|
|
q->counter = 0;
|
|
q->loss = qopt->loss;
|
|
+
|
|
+ ret = check_netem_in_tree(sch, qopt->duplicate, extack);
|
|
+ if (ret)
|
|
+ goto unlock;
|
|
+
|
|
q->duplicate = qopt->duplicate;
|
|
|
|
/* for compatibility with earlier versions.
|
|
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
|
|
index d162e2dd860255..a01d17d03bf571 100644
|
|
--- a/net/sched/sch_taprio.c
|
|
+++ b/net/sched/sch_taprio.c
|
|
@@ -41,6 +41,11 @@ static struct static_key_false taprio_have_working_mqprio;
|
|
#define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
|
|
#define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
|
|
#define TAPRIO_FLAGS_INVALID U32_MAX
|
|
+/* Minimum value for picos_per_byte to ensure non-zero duration
|
|
+ * for minimum-sized Ethernet frames (ETH_ZLEN = 60).
|
|
+ * 60 * 17 > PSEC_PER_NSEC (1000)
|
|
+ */
|
|
+#define TAPRIO_PICOS_PER_BYTE_MIN 17
|
|
|
|
struct sched_entry {
|
|
/* Durations between this GCL entry and the GCL entry where the
|
|
@@ -1294,7 +1299,8 @@ static void taprio_start_sched(struct Qdisc *sch,
|
|
}
|
|
|
|
static void taprio_set_picos_per_byte(struct net_device *dev,
|
|
- struct taprio_sched *q)
|
|
+ struct taprio_sched *q,
|
|
+ struct netlink_ext_ack *extack)
|
|
{
|
|
struct ethtool_link_ksettings ecmd;
|
|
int speed = SPEED_10;
|
|
@@ -1310,6 +1316,15 @@ static void taprio_set_picos_per_byte(struct net_device *dev,
|
|
|
|
skip:
|
|
picos_per_byte = (USEC_PER_SEC * 8) / speed;
|
|
+ if (picos_per_byte < TAPRIO_PICOS_PER_BYTE_MIN) {
|
|
+ if (!extack)
|
|
+ pr_warn("Link speed %d is too high. Schedule may be inaccurate.\n",
|
|
+ speed);
|
|
+ NL_SET_ERR_MSG_FMT_MOD(extack,
|
|
+ "Link speed %d is too high. Schedule may be inaccurate.",
|
|
+ speed);
|
|
+ picos_per_byte = TAPRIO_PICOS_PER_BYTE_MIN;
|
|
+ }
|
|
|
|
atomic64_set(&q->picos_per_byte, picos_per_byte);
|
|
netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
|
|
@@ -1334,7 +1349,7 @@ static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
|
|
if (dev != qdisc_dev(q->root))
|
|
continue;
|
|
|
|
- taprio_set_picos_per_byte(dev, q);
|
|
+ taprio_set_picos_per_byte(dev, q, NULL);
|
|
|
|
stab = rtnl_dereference(q->root->stab);
|
|
|
|
@@ -1871,7 +1886,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
|
|
q->flags = err;
|
|
|
|
/* Needed for length_to_duration() during netlink attribute parsing */
|
|
- taprio_set_picos_per_byte(dev, q);
|
|
+ taprio_set_picos_per_byte(dev, q, extack);
|
|
|
|
err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
|
|
if (err < 0)
|
|
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
|
|
index 8d760f8fc4b5a6..7229b4a9ad1dcf 100644
|
|
--- a/net/sunrpc/svcsock.c
|
|
+++ b/net/sunrpc/svcsock.c
|
|
@@ -257,20 +257,47 @@ svc_tcp_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
|
|
}
|
|
|
|
static int
|
|
-svc_tcp_sock_recv_cmsg(struct svc_sock *svsk, struct msghdr *msg)
|
|
+svc_tcp_sock_recv_cmsg(struct socket *sock, unsigned int *msg_flags)
|
|
{
|
|
union {
|
|
struct cmsghdr cmsg;
|
|
u8 buf[CMSG_SPACE(sizeof(u8))];
|
|
} u;
|
|
- struct socket *sock = svsk->sk_sock;
|
|
+ u8 alert[2];
|
|
+ struct kvec alert_kvec = {
|
|
+ .iov_base = alert,
|
|
+ .iov_len = sizeof(alert),
|
|
+ };
|
|
+ struct msghdr msg = {
|
|
+ .msg_flags = *msg_flags,
|
|
+ .msg_control = &u,
|
|
+ .msg_controllen = sizeof(u),
|
|
+ };
|
|
+ int ret;
|
|
+
|
|
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &alert_kvec, 1,
|
|
+ alert_kvec.iov_len);
|
|
+ ret = sock_recvmsg(sock, &msg, MSG_DONTWAIT);
|
|
+ if (ret > 0 &&
|
|
+ tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT) {
|
|
+ iov_iter_revert(&msg.msg_iter, ret);
|
|
+ ret = svc_tcp_sock_process_cmsg(sock, &msg, &u.cmsg, -EAGAIN);
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int
|
|
+svc_tcp_sock_recvmsg(struct svc_sock *svsk, struct msghdr *msg)
|
|
+{
|
|
int ret;
|
|
+ struct socket *sock = svsk->sk_sock;
|
|
|
|
- msg->msg_control = &u;
|
|
- msg->msg_controllen = sizeof(u);
|
|
ret = sock_recvmsg(sock, msg, MSG_DONTWAIT);
|
|
- if (unlikely(msg->msg_controllen != sizeof(u)))
|
|
- ret = svc_tcp_sock_process_cmsg(sock, msg, &u.cmsg, ret);
|
|
+ if (msg->msg_flags & MSG_CTRUNC) {
|
|
+ msg->msg_flags &= ~(MSG_CTRUNC | MSG_EOR);
|
|
+ if (ret == 0 || ret == -EIO)
|
|
+ ret = svc_tcp_sock_recv_cmsg(sock, &msg->msg_flags);
|
|
+ }
|
|
return ret;
|
|
}
|
|
|
|
@@ -321,7 +348,7 @@ static ssize_t svc_tcp_read_msg(struct svc_rqst *rqstp, size_t buflen,
|
|
iov_iter_advance(&msg.msg_iter, seek);
|
|
buflen -= seek;
|
|
}
|
|
- len = svc_tcp_sock_recv_cmsg(svsk, &msg);
|
|
+ len = svc_tcp_sock_recvmsg(svsk, &msg);
|
|
if (len > 0)
|
|
svc_flush_bvec(bvec, len, seek);
|
|
|
|
@@ -1019,7 +1046,7 @@ static ssize_t svc_tcp_read_marker(struct svc_sock *svsk,
|
|
iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen;
|
|
iov.iov_len = want;
|
|
iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, want);
|
|
- len = svc_tcp_sock_recv_cmsg(svsk, &msg);
|
|
+ len = svc_tcp_sock_recvmsg(svsk, &msg);
|
|
if (len < 0)
|
|
return len;
|
|
svsk->sk_tcplen += len;
|
|
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
|
|
index f90d84492bbedc..99bb3e762af46f 100644
|
|
--- a/net/sunrpc/xprtsock.c
|
|
+++ b/net/sunrpc/xprtsock.c
|
|
@@ -358,7 +358,7 @@ xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
|
|
|
|
static int
|
|
xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
|
|
- struct cmsghdr *cmsg, int ret)
|
|
+ unsigned int *msg_flags, struct cmsghdr *cmsg, int ret)
|
|
{
|
|
u8 content_type = tls_get_record_type(sock->sk, cmsg);
|
|
u8 level, description;
|
|
@@ -371,7 +371,7 @@ xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
|
|
* record, even though there might be more frames
|
|
* waiting to be decrypted.
|
|
*/
|
|
- msg->msg_flags &= ~MSG_EOR;
|
|
+ *msg_flags &= ~MSG_EOR;
|
|
break;
|
|
case TLS_RECORD_TYPE_ALERT:
|
|
tls_alert_recv(sock->sk, msg, &level, &description);
|
|
@@ -386,19 +386,33 @@ xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
|
|
}
|
|
|
|
static int
|
|
-xs_sock_recv_cmsg(struct socket *sock, struct msghdr *msg, int flags)
|
|
+xs_sock_recv_cmsg(struct socket *sock, unsigned int *msg_flags, int flags)
|
|
{
|
|
union {
|
|
struct cmsghdr cmsg;
|
|
u8 buf[CMSG_SPACE(sizeof(u8))];
|
|
} u;
|
|
+ u8 alert[2];
|
|
+ struct kvec alert_kvec = {
|
|
+ .iov_base = alert,
|
|
+ .iov_len = sizeof(alert),
|
|
+ };
|
|
+ struct msghdr msg = {
|
|
+ .msg_flags = *msg_flags,
|
|
+ .msg_control = &u,
|
|
+ .msg_controllen = sizeof(u),
|
|
+ };
|
|
int ret;
|
|
|
|
- msg->msg_control = &u;
|
|
- msg->msg_controllen = sizeof(u);
|
|
- ret = sock_recvmsg(sock, msg, flags);
|
|
- if (msg->msg_controllen != sizeof(u))
|
|
- ret = xs_sock_process_cmsg(sock, msg, &u.cmsg, ret);
|
|
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &alert_kvec, 1,
|
|
+ alert_kvec.iov_len);
|
|
+ ret = sock_recvmsg(sock, &msg, flags);
|
|
+ if (ret > 0 &&
|
|
+ tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT) {
|
|
+ iov_iter_revert(&msg.msg_iter, ret);
|
|
+ ret = xs_sock_process_cmsg(sock, &msg, msg_flags, &u.cmsg,
|
|
+ -EAGAIN);
|
|
+ }
|
|
return ret;
|
|
}
|
|
|
|
@@ -408,7 +422,13 @@ xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek)
|
|
ssize_t ret;
|
|
if (seek != 0)
|
|
iov_iter_advance(&msg->msg_iter, seek);
|
|
- ret = xs_sock_recv_cmsg(sock, msg, flags);
|
|
+ ret = sock_recvmsg(sock, msg, flags);
|
|
+ /* Handle TLS inband control message lazily */
|
|
+ if (msg->msg_flags & MSG_CTRUNC) {
|
|
+ msg->msg_flags &= ~(MSG_CTRUNC | MSG_EOR);
|
|
+ if (ret == 0 || ret == -EIO)
|
|
+ ret = xs_sock_recv_cmsg(sock, &msg->msg_flags, flags);
|
|
+ }
|
|
return ret > 0 ? ret + seek : ret;
|
|
}
|
|
|
|
@@ -434,7 +454,7 @@ xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
|
|
size_t count)
|
|
{
|
|
iov_iter_discard(&msg->msg_iter, ITER_DEST, count);
|
|
- return xs_sock_recv_cmsg(sock, msg, flags);
|
|
+ return xs_sock_recvmsg(sock, msg, flags, 0);
|
|
}
|
|
|
|
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
|
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
|
|
index 4a9a3aed5d6d41..4905a81c4ac194 100644
|
|
--- a/net/tls/tls_sw.c
|
|
+++ b/net/tls/tls_sw.c
|
|
@@ -872,6 +872,19 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
|
|
delta = msg->sg.size;
|
|
psock->eval = sk_psock_msg_verdict(sk, psock, msg);
|
|
delta -= msg->sg.size;
|
|
+
|
|
+ if ((s32)delta > 0) {
|
|
+ /* It indicates that we executed bpf_msg_pop_data(),
|
|
+ * causing the plaintext data size to decrease.
|
|
+ * Therefore the encrypted data size also needs to
|
|
+ * correspondingly decrease. We only need to subtract
|
|
+ * delta to calculate the new ciphertext length since
|
|
+ * ktls does not support block encryption.
|
|
+ */
|
|
+ struct sk_msg *enc = &ctx->open_rec->msg_encrypted;
|
|
+
|
|
+ sk_msg_trim(sk, enc, enc->sg.size - delta);
|
|
+ }
|
|
}
|
|
if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
|
|
!enospc && !full_record) {
|
|
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
|
|
index f20b117e5255ef..f95ac11a7e0de4 100644
|
|
--- a/net/vmw_vsock/af_vsock.c
|
|
+++ b/net/vmw_vsock/af_vsock.c
|
|
@@ -688,7 +688,8 @@ static int __vsock_bind_connectible(struct vsock_sock *vsk,
|
|
unsigned int i;
|
|
|
|
for (i = 0; i < MAX_PORT_RETRIES; i++) {
|
|
- if (port <= LAST_RESERVED_PORT)
|
|
+ if (port == VMADDR_PORT_ANY ||
|
|
+ port <= LAST_RESERVED_PORT)
|
|
port = LAST_RESERVED_PORT + 1;
|
|
|
|
new_addr.svm_port = port++;
|
|
diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c
|
|
index 867debd3b9124c..1d7254bcb44cb7 100644
|
|
--- a/samples/mei/mei-amt-version.c
|
|
+++ b/samples/mei/mei-amt-version.c
|
|
@@ -69,11 +69,11 @@
|
|
#include <string.h>
|
|
#include <fcntl.h>
|
|
#include <sys/ioctl.h>
|
|
+#include <sys/time.h>
|
|
#include <unistd.h>
|
|
#include <errno.h>
|
|
#include <stdint.h>
|
|
#include <stdbool.h>
|
|
-#include <bits/wordsize.h>
|
|
#include <linux/mei.h>
|
|
|
|
/*****************************************************************************
|
|
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
|
|
index 4f3ba3debc08e1..119997c8bf1f74 100644
|
|
--- a/scripts/kconfig/qconf.cc
|
|
+++ b/scripts/kconfig/qconf.cc
|
|
@@ -480,7 +480,7 @@ void ConfigList::updateListAllForAll()
|
|
while (it.hasNext()) {
|
|
ConfigList *list = it.next();
|
|
|
|
- list->updateList();
|
|
+ list->updateListAll();
|
|
}
|
|
}
|
|
|
|
diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h
|
|
index 58fbf67139b9c7..e59305abb85a33 100644
|
|
--- a/security/apparmor/include/match.h
|
|
+++ b/security/apparmor/include/match.h
|
|
@@ -141,17 +141,15 @@ aa_state_t aa_dfa_matchn_until(struct aa_dfa *dfa, aa_state_t start,
|
|
|
|
void aa_dfa_free_kref(struct kref *kref);
|
|
|
|
-#define WB_HISTORY_SIZE 24
|
|
+/* This needs to be a power of 2 */
|
|
+#define WB_HISTORY_SIZE 32
|
|
struct match_workbuf {
|
|
- unsigned int count;
|
|
unsigned int pos;
|
|
unsigned int len;
|
|
- unsigned int size; /* power of 2, same as history size */
|
|
- unsigned int history[WB_HISTORY_SIZE];
|
|
+ aa_state_t history[WB_HISTORY_SIZE];
|
|
};
|
|
#define DEFINE_MATCH_WB(N) \
|
|
struct match_workbuf N = { \
|
|
- .count = 0, \
|
|
.pos = 0, \
|
|
.len = 0, \
|
|
}
|
|
diff --git a/security/apparmor/match.c b/security/apparmor/match.c
|
|
index b97ef5e1db7320..3667b79e9366b0 100644
|
|
--- a/security/apparmor/match.c
|
|
+++ b/security/apparmor/match.c
|
|
@@ -668,34 +668,35 @@ aa_state_t aa_dfa_matchn_until(struct aa_dfa *dfa, aa_state_t start,
|
|
return state;
|
|
}
|
|
|
|
-#define inc_wb_pos(wb) \
|
|
-do { \
|
|
+#define inc_wb_pos(wb) \
|
|
+do { \
|
|
+ BUILD_BUG_ON_NOT_POWER_OF_2(WB_HISTORY_SIZE); \
|
|
wb->pos = (wb->pos + 1) & (WB_HISTORY_SIZE - 1); \
|
|
- wb->len = (wb->len + 1) & (WB_HISTORY_SIZE - 1); \
|
|
+ wb->len = (wb->len + 1) > WB_HISTORY_SIZE ? WB_HISTORY_SIZE : \
|
|
+ wb->len + 1; \
|
|
} while (0)
|
|
|
|
/* For DFAs that don't support extended tagging of states */
|
|
+/* adjust is only set if is_loop returns true */
|
|
static bool is_loop(struct match_workbuf *wb, aa_state_t state,
|
|
unsigned int *adjust)
|
|
{
|
|
- aa_state_t pos = wb->pos;
|
|
- aa_state_t i;
|
|
+ int pos = wb->pos;
|
|
+ int i;
|
|
|
|
if (wb->history[pos] < state)
|
|
return false;
|
|
|
|
- for (i = 0; i <= wb->len; i++) {
|
|
+ for (i = 0; i < wb->len; i++) {
|
|
if (wb->history[pos] == state) {
|
|
*adjust = i;
|
|
return true;
|
|
}
|
|
- if (pos == 0)
|
|
- pos = WB_HISTORY_SIZE;
|
|
- pos--;
|
|
+ /* -1 wraps to WB_HISTORY_SIZE - 1 */
|
|
+ pos = (pos - 1) & (WB_HISTORY_SIZE - 1);
|
|
}
|
|
|
|
- *adjust = i;
|
|
- return true;
|
|
+ return false;
|
|
}
|
|
|
|
static aa_state_t leftmatch_fb(struct aa_dfa *dfa, aa_state_t start,
|
|
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
|
|
index 27e48fdbbf3aa0..94b452595f3032 100644
|
|
--- a/sound/pci/hda/patch_ca0132.c
|
|
+++ b/sound/pci/hda/patch_ca0132.c
|
|
@@ -4803,7 +4803,8 @@ static int ca0132_alt_select_out(struct hda_codec *codec)
|
|
if (err < 0)
|
|
goto exit;
|
|
|
|
- if (ca0132_alt_select_out_quirk_set(codec) < 0)
|
|
+ err = ca0132_alt_select_out_quirk_set(codec);
|
|
+ if (err < 0)
|
|
goto exit;
|
|
|
|
switch (spec->cur_out_type) {
|
|
@@ -4893,6 +4894,8 @@ static int ca0132_alt_select_out(struct hda_codec *codec)
|
|
spec->bass_redirection_val);
|
|
else
|
|
err = ca0132_alt_surround_set_bass_redirection(codec, 0);
|
|
+ if (err < 0)
|
|
+ goto exit;
|
|
|
|
/* Unmute DSP now that we're done with output selection. */
|
|
err = dspio_set_uint_param(codec, 0x96,
|
|
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
|
|
index 66ef8f4fd02cd4..24919e68b34689 100644
|
|
--- a/sound/soc/amd/yc/acp6x-mach.c
|
|
+++ b/sound/soc/amd/yc/acp6x-mach.c
|
|
@@ -409,6 +409,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "M6500RC"),
|
|
}
|
|
},
|
|
+ {
|
|
+ .driver_data = &acp6x_card,
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "M6501RM"),
|
|
+ }
|
|
+ },
|
|
{
|
|
.driver_data = &acp6x_card,
|
|
.matches = {
|
|
@@ -528,6 +535,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16z-n000"),
|
|
}
|
|
},
|
|
+ {
|
|
+ .driver_data = &acp6x_card,
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "Victus by HP Gaming Laptop 15-fb1xxx"),
|
|
+ }
|
|
+ },
|
|
{
|
|
.driver_data = &acp6x_card,
|
|
.matches = {
|
|
@@ -577,6 +591,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
|
|
DMI_MATCH(DMI_BOARD_NAME, "8A7F"),
|
|
}
|
|
},
|
|
+ {
|
|
+ .driver_data = &acp6x_card,
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
|
|
+ DMI_MATCH(DMI_BOARD_NAME, "8A81"),
|
|
+ }
|
|
+ },
|
|
{
|
|
.driver_data = &acp6x_card,
|
|
.matches = {
|
|
diff --git a/sound/soc/fsl/fsl_xcvr.c b/sound/soc/fsl/fsl_xcvr.c
|
|
index c46f64557a7ffd..1d7791c7fb4ec0 100644
|
|
--- a/sound/soc/fsl/fsl_xcvr.c
|
|
+++ b/sound/soc/fsl/fsl_xcvr.c
|
|
@@ -1197,6 +1197,26 @@ static irqreturn_t irq0_isr(int irq, void *devid)
|
|
/* clear CS control register */
|
|
memset_io(reg_ctrl, 0, sizeof(val));
|
|
}
|
|
+ } else {
|
|
+ regmap_read(xcvr->regmap, FSL_XCVR_RX_CS_DATA_0,
|
|
+ (u32 *)&xcvr->rx_iec958.status[0]);
|
|
+ regmap_read(xcvr->regmap, FSL_XCVR_RX_CS_DATA_1,
|
|
+ (u32 *)&xcvr->rx_iec958.status[4]);
|
|
+ regmap_read(xcvr->regmap, FSL_XCVR_RX_CS_DATA_2,
|
|
+ (u32 *)&xcvr->rx_iec958.status[8]);
|
|
+ regmap_read(xcvr->regmap, FSL_XCVR_RX_CS_DATA_3,
|
|
+ (u32 *)&xcvr->rx_iec958.status[12]);
|
|
+ regmap_read(xcvr->regmap, FSL_XCVR_RX_CS_DATA_4,
|
|
+ (u32 *)&xcvr->rx_iec958.status[16]);
|
|
+ regmap_read(xcvr->regmap, FSL_XCVR_RX_CS_DATA_5,
|
|
+ (u32 *)&xcvr->rx_iec958.status[20]);
|
|
+ for (i = 0; i < 6; i++) {
|
|
+ val = *(u32 *)(xcvr->rx_iec958.status + i * 4);
|
|
+ *(u32 *)(xcvr->rx_iec958.status + i * 4) =
|
|
+ bitrev32(val);
|
|
+ }
|
|
+ regmap_set_bits(xcvr->regmap, FSL_XCVR_RX_DPTH_CTRL,
|
|
+ FSL_XCVR_RX_DPTH_CTRL_CSA);
|
|
}
|
|
}
|
|
if (isr & FSL_XCVR_IRQ_NEW_UD) {
|
|
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
|
|
index 0ae6eecc885191..033be4d3e02db9 100644
|
|
--- a/sound/soc/intel/boards/Kconfig
|
|
+++ b/sound/soc/intel/boards/Kconfig
|
|
@@ -11,7 +11,7 @@ menuconfig SND_SOC_INTEL_MACH
|
|
kernel: saying N will just cause the configurator to skip all
|
|
the questions about Intel ASoC machine drivers.
|
|
|
|
-if SND_SOC_INTEL_MACH
|
|
+if SND_SOC_INTEL_MACH && (SND_SOC_SOF_INTEL_COMMON || !SND_SOC_SOF_INTEL_COMMON)
|
|
|
|
config SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES
|
|
bool "Use more user friendly long card names"
|
|
diff --git a/sound/soc/mediatek/common/mtk-afe-platform-driver.c b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
|
|
index 52495c930ca3bf..56a704ec2ea947 100644
|
|
--- a/sound/soc/mediatek/common/mtk-afe-platform-driver.c
|
|
+++ b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
|
|
@@ -120,7 +120,9 @@ int mtk_afe_pcm_new(struct snd_soc_component *component,
|
|
struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
|
|
|
|
size = afe->mtk_afe_hardware->buffer_bytes_max;
|
|
- snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV, afe->dev, 0, size);
|
|
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV, afe->dev,
|
|
+ afe->preallocate_buffers ? size : 0,
|
|
+ size);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/sound/soc/mediatek/common/mtk-base-afe.h b/sound/soc/mediatek/common/mtk-base-afe.h
|
|
index f51578b6c50a35..a406f2e3e7a878 100644
|
|
--- a/sound/soc/mediatek/common/mtk-base-afe.h
|
|
+++ b/sound/soc/mediatek/common/mtk-base-afe.h
|
|
@@ -117,6 +117,7 @@ struct mtk_base_afe {
|
|
struct mtk_base_afe_irq *irqs;
|
|
int irqs_size;
|
|
int memif_32bit_supported;
|
|
+ bool preallocate_buffers;
|
|
|
|
struct list_head sub_dais;
|
|
struct snd_soc_dai_driver *dai_drivers;
|
|
diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
|
|
index 06269f7e37566b..240b2f041a3f8b 100644
|
|
--- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
|
|
+++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
|
|
@@ -13,6 +13,7 @@
|
|
#include <linux/module.h>
|
|
#include <linux/of.h>
|
|
#include <linux/of_address.h>
|
|
+#include <linux/of_reserved_mem.h>
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/pm_runtime.h>
|
|
#include <sound/soc.h>
|
|
@@ -1070,6 +1071,12 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
|
|
|
|
afe->dev = &pdev->dev;
|
|
|
|
+ ret = of_reserved_mem_device_init(&pdev->dev);
|
|
+ if (ret) {
|
|
+ dev_info(&pdev->dev, "no reserved memory found, pre-allocating buffers instead\n");
|
|
+ afe->preallocate_buffers = true;
|
|
+ }
|
|
+
|
|
irq_id = platform_get_irq(pdev, 0);
|
|
if (irq_id <= 0)
|
|
return irq_id < 0 ? irq_id : -ENXIO;
|
|
diff --git a/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
|
|
index 90422ed2bbcc27..cbee5a8764c37f 100644
|
|
--- a/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
|
|
+++ b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
|
|
@@ -10,6 +10,7 @@
|
|
#include <linux/mfd/syscon.h>
|
|
#include <linux/of.h>
|
|
#include <linux/of_address.h>
|
|
+#include <linux/of_reserved_mem.h>
|
|
#include <linux/pm_runtime.h>
|
|
#include <linux/reset.h>
|
|
|
|
@@ -1106,6 +1107,12 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
|
|
afe->dev = &pdev->dev;
|
|
dev = afe->dev;
|
|
|
|
+ ret = of_reserved_mem_device_init(dev);
|
|
+ if (ret) {
|
|
+ dev_info(dev, "no reserved memory found, pre-allocating buffers instead\n");
|
|
+ afe->preallocate_buffers = true;
|
|
+ }
|
|
+
|
|
/* initial audio related clock */
|
|
ret = mt8183_init_clock(afe);
|
|
if (ret) {
|
|
diff --git a/sound/soc/mediatek/mt8186/mt8186-afe-pcm.c b/sound/soc/mediatek/mt8186/mt8186-afe-pcm.c
|
|
index b86159f70a33a2..9ee431304a431b 100644
|
|
--- a/sound/soc/mediatek/mt8186/mt8186-afe-pcm.c
|
|
+++ b/sound/soc/mediatek/mt8186/mt8186-afe-pcm.c
|
|
@@ -10,6 +10,7 @@
|
|
#include <linux/module.h>
|
|
#include <linux/of.h>
|
|
#include <linux/of_address.h>
|
|
+#include <linux/of_reserved_mem.h>
|
|
#include <linux/pm_runtime.h>
|
|
#include <linux/reset.h>
|
|
#include <sound/soc.h>
|
|
@@ -2835,6 +2836,12 @@ static int mt8186_afe_pcm_dev_probe(struct platform_device *pdev)
|
|
afe_priv = afe->platform_priv;
|
|
afe->dev = &pdev->dev;
|
|
|
|
+ ret = of_reserved_mem_device_init(dev);
|
|
+ if (ret) {
|
|
+ dev_info(dev, "no reserved memory found, pre-allocating buffers instead\n");
|
|
+ afe->preallocate_buffers = true;
|
|
+ }
|
|
+
|
|
afe->base_addr = devm_platform_ioremap_resource(pdev, 0);
|
|
if (IS_ERR(afe->base_addr))
|
|
return PTR_ERR(afe->base_addr);
|
|
diff --git a/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c b/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c
|
|
index d0520e7e1d79ae..364e43da0e2416 100644
|
|
--- a/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c
|
|
+++ b/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c
|
|
@@ -12,6 +12,7 @@
|
|
#include <linux/mfd/syscon.h>
|
|
#include <linux/of.h>
|
|
#include <linux/of_address.h>
|
|
+#include <linux/of_reserved_mem.h>
|
|
#include <linux/pm_runtime.h>
|
|
#include <linux/reset.h>
|
|
#include <sound/soc.h>
|
|
@@ -2196,6 +2197,12 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
|
|
afe->dev = &pdev->dev;
|
|
dev = afe->dev;
|
|
|
|
+ ret = of_reserved_mem_device_init(dev);
|
|
+ if (ret) {
|
|
+ dev_info(dev, "no reserved memory found, pre-allocating buffers instead\n");
|
|
+ afe->preallocate_buffers = true;
|
|
+ }
|
|
+
|
|
/* init audio related clock */
|
|
ret = mt8192_init_clock(afe);
|
|
if (ret) {
|
|
diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c
|
|
index 507743c87e402d..5a0fa8d1f38b5c 100644
|
|
--- a/sound/soc/soc-dai.c
|
|
+++ b/sound/soc/soc-dai.c
|
|
@@ -273,13 +273,15 @@ int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
|
|
&rx_mask,
|
|
};
|
|
|
|
- if (dai->driver->ops &&
|
|
- dai->driver->ops->xlate_tdm_slot_mask)
|
|
- ret = dai->driver->ops->xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask);
|
|
- else
|
|
- ret = snd_soc_xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask);
|
|
- if (ret)
|
|
- goto err;
|
|
+ if (slots) {
|
|
+ if (dai->driver->ops &&
|
|
+ dai->driver->ops->xlate_tdm_slot_mask)
|
|
+ ret = dai->driver->ops->xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask);
|
|
+ else
|
|
+ ret = snd_soc_xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask);
|
|
+ if (ret)
|
|
+ goto err;
|
|
+ }
|
|
|
|
for_each_pcm_streams(stream)
|
|
snd_soc_dai_tdm_mask_set(dai, stream, *tdm_mask[stream]);
|
|
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
|
|
index eff1355cc3df00..5be32c37bb8a09 100644
|
|
--- a/sound/soc/soc-ops.c
|
|
+++ b/sound/soc/soc-ops.c
|
|
@@ -641,28 +641,32 @@ EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range);
|
|
static int snd_soc_clip_to_platform_max(struct snd_kcontrol *kctl)
|
|
{
|
|
struct soc_mixer_control *mc = (struct soc_mixer_control *)kctl->private_value;
|
|
- struct snd_ctl_elem_value uctl;
|
|
+ struct snd_ctl_elem_value *uctl;
|
|
int ret;
|
|
|
|
if (!mc->platform_max)
|
|
return 0;
|
|
|
|
- ret = kctl->get(kctl, &uctl);
|
|
+ uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
|
|
+ if (!uctl)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ ret = kctl->get(kctl, uctl);
|
|
if (ret < 0)
|
|
- return ret;
|
|
+ goto out;
|
|
|
|
- if (uctl.value.integer.value[0] > mc->platform_max)
|
|
- uctl.value.integer.value[0] = mc->platform_max;
|
|
+ if (uctl->value.integer.value[0] > mc->platform_max)
|
|
+ uctl->value.integer.value[0] = mc->platform_max;
|
|
|
|
if (snd_soc_volsw_is_stereo(mc) &&
|
|
- uctl.value.integer.value[1] > mc->platform_max)
|
|
- uctl.value.integer.value[1] = mc->platform_max;
|
|
+ uctl->value.integer.value[1] > mc->platform_max)
|
|
+ uctl->value.integer.value[1] = mc->platform_max;
|
|
|
|
- ret = kctl->put(kctl, &uctl);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
+ ret = kctl->put(kctl, uctl);
|
|
|
|
- return 0;
|
|
+out:
|
|
+ kfree(uctl);
|
|
+ return ret;
|
|
}
|
|
|
|
/**
|
|
diff --git a/sound/usb/mixer_scarlett2.c b/sound/usb/mixer_scarlett2.c
|
|
index 90480b9b9b0891..316f1abefd51db 100644
|
|
--- a/sound/usb/mixer_scarlett2.c
|
|
+++ b/sound/usb/mixer_scarlett2.c
|
|
@@ -1279,6 +1279,8 @@ static int scarlett2_usb(
|
|
struct scarlett2_usb_packet *req, *resp = NULL;
|
|
size_t req_buf_size = struct_size(req, data, req_size);
|
|
size_t resp_buf_size = struct_size(resp, data, resp_size);
|
|
+ int retries = 0;
|
|
+ const int max_retries = 5;
|
|
int err;
|
|
|
|
req = kmalloc(req_buf_size, GFP_KERNEL);
|
|
@@ -1302,10 +1304,15 @@ static int scarlett2_usb(
|
|
if (req_size)
|
|
memcpy(req->data, req_data, req_size);
|
|
|
|
+retry:
|
|
err = scarlett2_usb_tx(dev, private->bInterfaceNumber,
|
|
req, req_buf_size);
|
|
|
|
if (err != req_buf_size) {
|
|
+ if (err == -EPROTO && ++retries <= max_retries) {
|
|
+ msleep(5 * (1 << (retries - 1)));
|
|
+ goto retry;
|
|
+ }
|
|
usb_audio_err(
|
|
mixer->chip,
|
|
"%s USB request result cmd %x was %d\n",
|
|
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
|
|
index ab95fb34a63584..7b9292cf839f27 100644
|
|
--- a/sound/x86/intel_hdmi_audio.c
|
|
+++ b/sound/x86/intel_hdmi_audio.c
|
|
@@ -1766,7 +1766,7 @@ static int __hdmi_lpe_audio_probe(struct platform_device *pdev)
|
|
/* setup private data which can be retrieved when required */
|
|
pcm->private_data = ctx;
|
|
pcm->info_flags = 0;
|
|
- strscpy(pcm->name, card->shortname, strlen(card->shortname));
|
|
+ strscpy(pcm->name, card->shortname, sizeof(pcm->name));
|
|
/* setup the ops for playback */
|
|
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &had_pcm_ops);
|
|
|
|
diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c
|
|
index 28e9417a5c2e35..c2ca82fc21e21a 100644
|
|
--- a/tools/bpf/bpftool/net.c
|
|
+++ b/tools/bpf/bpftool/net.c
|
|
@@ -360,17 +360,18 @@ static int dump_link_nlmsg(void *cookie, void *msg, struct nlattr **tb)
|
|
{
|
|
struct bpf_netdev_t *netinfo = cookie;
|
|
struct ifinfomsg *ifinfo = msg;
|
|
+ struct ip_devname_ifindex *tmp;
|
|
|
|
if (netinfo->filter_idx > 0 && netinfo->filter_idx != ifinfo->ifi_index)
|
|
return 0;
|
|
|
|
if (netinfo->used_len == netinfo->array_len) {
|
|
- netinfo->devices = realloc(netinfo->devices,
|
|
- (netinfo->array_len + 16) *
|
|
- sizeof(struct ip_devname_ifindex));
|
|
- if (!netinfo->devices)
|
|
+ tmp = realloc(netinfo->devices,
|
|
+ (netinfo->array_len + 16) * sizeof(struct ip_devname_ifindex));
|
|
+ if (!tmp)
|
|
return -ENOMEM;
|
|
|
|
+ netinfo->devices = tmp;
|
|
netinfo->array_len += 16;
|
|
}
|
|
netinfo->devices[netinfo->used_len].ifindex = ifinfo->ifi_index;
|
|
@@ -389,6 +390,7 @@ static int dump_class_qdisc_nlmsg(void *cookie, void *msg, struct nlattr **tb)
|
|
{
|
|
struct bpf_tcinfo_t *tcinfo = cookie;
|
|
struct tcmsg *info = msg;
|
|
+ struct tc_kind_handle *tmp;
|
|
|
|
if (tcinfo->is_qdisc) {
|
|
/* skip clsact qdisc */
|
|
@@ -400,11 +402,12 @@ static int dump_class_qdisc_nlmsg(void *cookie, void *msg, struct nlattr **tb)
|
|
}
|
|
|
|
if (tcinfo->used_len == tcinfo->array_len) {
|
|
- tcinfo->handle_array = realloc(tcinfo->handle_array,
|
|
+ tmp = realloc(tcinfo->handle_array,
|
|
(tcinfo->array_len + 16) * sizeof(struct tc_kind_handle));
|
|
- if (!tcinfo->handle_array)
|
|
+ if (!tmp)
|
|
return -ENOMEM;
|
|
|
|
+ tcinfo->handle_array = tmp;
|
|
tcinfo->array_len += 16;
|
|
}
|
|
tcinfo->handle_array[tcinfo->used_len].handle = info->tcm_handle;
|
|
diff --git a/tools/lib/subcmd/help.c b/tools/lib/subcmd/help.c
|
|
index 8561b0f01a2476..9ef569492560ef 100644
|
|
--- a/tools/lib/subcmd/help.c
|
|
+++ b/tools/lib/subcmd/help.c
|
|
@@ -9,6 +9,7 @@
|
|
#include <sys/stat.h>
|
|
#include <unistd.h>
|
|
#include <dirent.h>
|
|
+#include <assert.h>
|
|
#include "subcmd-util.h"
|
|
#include "help.h"
|
|
#include "exec-cmd.h"
|
|
@@ -82,10 +83,11 @@ void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
|
|
ci++;
|
|
cj++;
|
|
} else {
|
|
- zfree(&cmds->names[cj]);
|
|
- cmds->names[cj++] = cmds->names[ci++];
|
|
+ cmds->names[cj++] = cmds->names[ci];
|
|
+ cmds->names[ci++] = NULL;
|
|
}
|
|
} else if (cmp == 0) {
|
|
+ zfree(&cmds->names[ci]);
|
|
ci++;
|
|
ei++;
|
|
} else if (cmp > 0) {
|
|
@@ -94,12 +96,12 @@ void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
|
|
}
|
|
if (ci != cj) {
|
|
while (ci < cmds->cnt) {
|
|
- zfree(&cmds->names[cj]);
|
|
- cmds->names[cj++] = cmds->names[ci++];
|
|
+ cmds->names[cj++] = cmds->names[ci];
|
|
+ cmds->names[ci++] = NULL;
|
|
}
|
|
}
|
|
for (ci = cj; ci < cmds->cnt; ci++)
|
|
- zfree(&cmds->names[ci]);
|
|
+ assert(cmds->names[ci] == NULL);
|
|
cmds->cnt = cj;
|
|
}
|
|
|
|
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
|
|
index f533e76fb48002..5a8617eb541957 100644
|
|
--- a/tools/perf/.gitignore
|
|
+++ b/tools/perf/.gitignore
|
|
@@ -45,7 +45,5 @@ libbpf/
|
|
libperf/
|
|
libsubcmd/
|
|
libsymbol/
|
|
-libtraceevent/
|
|
-libtraceevent_plugins/
|
|
fixdep
|
|
Documentation/doc.dep
|
|
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
|
|
index ac9d94dbbeefae..16cb7278fabaf7 100644
|
|
--- a/tools/perf/builtin-sched.c
|
|
+++ b/tools/perf/builtin-sched.c
|
|
@@ -1125,6 +1125,21 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
|
|
atoms->nb_atoms++;
|
|
}
|
|
|
|
+static void free_work_atoms(struct work_atoms *atoms)
|
|
+{
|
|
+ struct work_atom *atom, *tmp;
|
|
+
|
|
+ if (atoms == NULL)
|
|
+ return;
|
|
+
|
|
+ list_for_each_entry_safe(atom, tmp, &atoms->work_list, list) {
|
|
+ list_del(&atom->list);
|
|
+ free(atom);
|
|
+ }
|
|
+ thread__zput(atoms->thread);
|
|
+ free(atoms);
|
|
+}
|
|
+
|
|
static int latency_switch_event(struct perf_sched *sched,
|
|
struct evsel *evsel,
|
|
struct perf_sample *sample,
|
|
@@ -1949,6 +1964,16 @@ static u64 evsel__get_time(struct evsel *evsel, u32 cpu)
|
|
return r->last_time[cpu];
|
|
}
|
|
|
|
+static void timehist__evsel_priv_destructor(void *priv)
|
|
+{
|
|
+ struct evsel_runtime *r = priv;
|
|
+
|
|
+ if (r) {
|
|
+ free(r->last_time);
|
|
+ free(r);
|
|
+ }
|
|
+}
|
|
+
|
|
static int comm_width = 30;
|
|
|
|
static char *timehist_get_commstr(struct thread *thread)
|
|
@@ -3080,6 +3105,8 @@ static int perf_sched__timehist(struct perf_sched *sched)
|
|
|
|
setup_pager();
|
|
|
|
+ evsel__set_priv_destructor(timehist__evsel_priv_destructor);
|
|
+
|
|
/* prefer sched_waking if it is captured */
|
|
if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
|
|
handlers[1].handler = timehist_sched_wakeup_ignore;
|
|
@@ -3180,13 +3207,13 @@ static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *d
|
|
this->total_runtime += data->total_runtime;
|
|
this->nb_atoms += data->nb_atoms;
|
|
this->total_lat += data->total_lat;
|
|
- list_splice(&data->work_list, &this->work_list);
|
|
+ list_splice_init(&data->work_list, &this->work_list);
|
|
if (this->max_lat < data->max_lat) {
|
|
this->max_lat = data->max_lat;
|
|
this->max_lat_start = data->max_lat_start;
|
|
this->max_lat_end = data->max_lat_end;
|
|
}
|
|
- zfree(&data);
|
|
+ free_work_atoms(data);
|
|
return;
|
|
}
|
|
}
|
|
@@ -3265,7 +3292,6 @@ static int perf_sched__lat(struct perf_sched *sched)
|
|
work_list = rb_entry(next, struct work_atoms, node);
|
|
output_lat_thread(sched, work_list);
|
|
next = rb_next(next);
|
|
- thread__zput(work_list->thread);
|
|
}
|
|
|
|
printf(" -----------------------------------------------------------------------------------------------------------------\n");
|
|
@@ -3279,6 +3305,13 @@ static int perf_sched__lat(struct perf_sched *sched)
|
|
|
|
rc = 0;
|
|
|
|
+ while ((next = rb_first_cached(&sched->sorted_atom_root))) {
|
|
+ struct work_atoms *data;
|
|
+
|
|
+ data = rb_entry(next, struct work_atoms, node);
|
|
+ rb_erase_cached(next, &sched->sorted_atom_root);
|
|
+ free_work_atoms(data);
|
|
+ }
|
|
out_free_cpus_switch_event:
|
|
free_cpus_switch_event(sched);
|
|
return rc;
|
|
@@ -3678,6 +3711,8 @@ int cmd_sched(int argc, const char **argv)
|
|
if (!argc)
|
|
usage_with_options(sched_usage, sched_options);
|
|
|
|
+ thread__set_priv_destructor(free);
|
|
+
|
|
/*
|
|
* Aliased to 'perf script' for now:
|
|
*/
|
|
diff --git a/tools/perf/tests/bp_account.c b/tools/perf/tests/bp_account.c
|
|
index 6f921db33cf90e..855b81c3326c7c 100644
|
|
--- a/tools/perf/tests/bp_account.c
|
|
+++ b/tools/perf/tests/bp_account.c
|
|
@@ -102,6 +102,7 @@ static int bp_accounting(int wp_cnt, int share)
|
|
fd_wp = wp_event((void *)&the_var, &attr_new);
|
|
TEST_ASSERT_VAL("failed to create max wp\n", fd_wp != -1);
|
|
pr_debug("wp max created\n");
|
|
+ close(fd_wp);
|
|
}
|
|
|
|
for (i = 0; i < wp_cnt; i++)
|
|
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
|
|
index 6d2b056232f6e1..2a6295f1ac1bc0 100644
|
|
--- a/tools/perf/util/evsel.c
|
|
+++ b/tools/perf/util/evsel.c
|
|
@@ -1460,6 +1460,15 @@ static void evsel__free_config_terms(struct evsel *evsel)
|
|
free_config_terms(&evsel->config_terms);
|
|
}
|
|
|
|
+static void (*evsel__priv_destructor)(void *priv);
|
|
+
|
|
+void evsel__set_priv_destructor(void (*destructor)(void *priv))
|
|
+{
|
|
+ assert(evsel__priv_destructor == NULL);
|
|
+
|
|
+ evsel__priv_destructor = destructor;
|
|
+}
|
|
+
|
|
void evsel__exit(struct evsel *evsel)
|
|
{
|
|
assert(list_empty(&evsel->core.node));
|
|
@@ -1485,6 +1494,8 @@ void evsel__exit(struct evsel *evsel)
|
|
hashmap__free(evsel->per_pkg_mask);
|
|
evsel->per_pkg_mask = NULL;
|
|
zfree(&evsel->metric_events);
|
|
+ if (evsel__priv_destructor)
|
|
+ evsel__priv_destructor(evsel->priv);
|
|
perf_evsel__object.fini(evsel);
|
|
}
|
|
|
|
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
|
|
index 848534ec74fa0a..ac396d6f95cb51 100644
|
|
--- a/tools/perf/util/evsel.h
|
|
+++ b/tools/perf/util/evsel.h
|
|
@@ -252,6 +252,8 @@ void evsel__init(struct evsel *evsel, struct perf_event_attr *attr, int idx);
|
|
void evsel__exit(struct evsel *evsel);
|
|
void evsel__delete(struct evsel *evsel);
|
|
|
|
+void evsel__set_priv_destructor(void (*destructor)(void *priv));
|
|
+
|
|
struct callchain_param;
|
|
|
|
void evsel__config(struct evsel *evsel, struct record_opts *opts,
|
|
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
|
|
index ea24f21aafc3e3..4f0bbebcb6d62d 100644
|
|
--- a/tools/perf/util/symbol.c
|
|
+++ b/tools/perf/util/symbol.c
|
|
@@ -1366,6 +1366,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
|
|
goto out_err;
|
|
}
|
|
}
|
|
+ map__zput(new_node->map);
|
|
free(new_node);
|
|
}
|
|
|
|
diff --git a/tools/testing/selftests/arm64/fp/sve-ptrace.c b/tools/testing/selftests/arm64/fp/sve-ptrace.c
|
|
index 6d61992fe8a01e..c6228176dd1a0c 100644
|
|
--- a/tools/testing/selftests/arm64/fp/sve-ptrace.c
|
|
+++ b/tools/testing/selftests/arm64/fp/sve-ptrace.c
|
|
@@ -251,7 +251,7 @@ static void ptrace_set_get_vl(pid_t child, const struct vec_type *type,
|
|
return;
|
|
}
|
|
|
|
- ksft_test_result(new_sve->vl = prctl_vl, "Set %s VL %u\n",
|
|
+ ksft_test_result(new_sve->vl == prctl_vl, "Set %s VL %u\n",
|
|
type->name, vl);
|
|
|
|
free(new_sve);
|
|
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
|
|
index 84d59419e4eb5b..a6d8aa8c2a9ac9 100644
|
|
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
|
|
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
|
|
@@ -894,6 +894,8 @@ static void redir_partial(int family, int sotype, int sock_map, int parser_map)
|
|
goto close;
|
|
|
|
n = xsend(c1, buf, sizeof(buf), 0);
|
|
+ if (n == -1)
|
|
+ goto close;
|
|
if (n < sizeof(buf))
|
|
FAIL("incomplete write");
|
|
|
|
diff --git a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
|
|
index b7c8f29c09a978..65916bb55dfbbf 100644
|
|
--- a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
|
|
+++ b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
|
|
@@ -14,11 +14,35 @@ fail() { #msg
|
|
exit_fail
|
|
}
|
|
|
|
+# As reading trace can last forever, simply look for 3 different
|
|
+# events then exit out of reading the file. If there's not 3 different
|
|
+# events, then the test has failed.
|
|
+check_unique() {
|
|
+ cat trace | grep -v '^#' | awk '
|
|
+ BEGIN { cnt = 0; }
|
|
+ {
|
|
+ for (i = 0; i < cnt; i++) {
|
|
+ if (event[i] == $5) {
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (i == cnt) {
|
|
+ event[cnt++] = $5;
|
|
+ if (cnt > 2) {
|
|
+ exit;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ END {
|
|
+ printf "%d", cnt;
|
|
+ }'
|
|
+}
|
|
+
|
|
echo 'sched:*' > set_event
|
|
|
|
yield
|
|
|
|
-count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
|
|
+count=`check_unique`
|
|
if [ $count -lt 3 ]; then
|
|
fail "at least fork, exec and exit events should be recorded"
|
|
fi
|
|
@@ -29,7 +53,7 @@ echo 1 > events/sched/enable
|
|
|
|
yield
|
|
|
|
-count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
|
|
+count=`check_unique`
|
|
if [ $count -lt 3 ]; then
|
|
fail "at least fork, exec and exit events should be recorded"
|
|
fi
|
|
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
|
|
index 855505c40ed8eb..c9ba4d269c3828 100755
|
|
--- a/tools/testing/selftests/net/rtnetlink.sh
|
|
+++ b/tools/testing/selftests/net/rtnetlink.sh
|
|
@@ -854,6 +854,11 @@ kci_test_ipsec_offload()
|
|
sysfsf=$sysfsd/ipsec
|
|
sysfsnet=/sys/bus/netdevsim/devices/netdevsim0/net/
|
|
probed=false
|
|
+ esp4_offload_probed_default=false
|
|
+
|
|
+ if lsmod | grep -q esp4_offload; then
|
|
+ esp4_offload_probed_default=true
|
|
+ fi
|
|
|
|
# setup netdevsim since dummydev doesn't have offload support
|
|
if [ ! -w /sys/bus/netdevsim/new_device ] ; then
|
|
@@ -943,6 +948,7 @@ EOF
|
|
fi
|
|
|
|
# clean up any leftovers
|
|
+ ! "$esp4_offload_probed_default" && lsmod | grep -q esp4_offload && rmmod esp4_offload
|
|
echo 0 > /sys/bus/netdevsim/del_device
|
|
$probed && rmmod netdevsim
|
|
|
|
diff --git a/tools/testing/selftests/perf_events/.gitignore b/tools/testing/selftests/perf_events/.gitignore
|
|
index 790c47001e77e3..4858977dd55b5f 100644
|
|
--- a/tools/testing/selftests/perf_events/.gitignore
|
|
+++ b/tools/testing/selftests/perf_events/.gitignore
|
|
@@ -1,3 +1,4 @@
|
|
# SPDX-License-Identifier: GPL-2.0-only
|
|
sigtrap_threads
|
|
remove_on_exec
|
|
+mmap
|
|
diff --git a/tools/testing/selftests/perf_events/Makefile b/tools/testing/selftests/perf_events/Makefile
|
|
index db93c4ff081a45..913854914ae499 100644
|
|
--- a/tools/testing/selftests/perf_events/Makefile
|
|
+++ b/tools/testing/selftests/perf_events/Makefile
|
|
@@ -2,5 +2,5 @@
|
|
CFLAGS += -Wl,-no-as-needed -Wall $(KHDR_INCLUDES)
|
|
LDFLAGS += -lpthread
|
|
|
|
-TEST_GEN_PROGS := sigtrap_threads remove_on_exec
|
|
+TEST_GEN_PROGS := sigtrap_threads remove_on_exec mmap
|
|
include ../lib.mk
|
|
diff --git a/tools/testing/selftests/perf_events/mmap.c b/tools/testing/selftests/perf_events/mmap.c
|
|
new file mode 100644
|
|
index 00000000000000..ea0427aac1f98f
|
|
--- /dev/null
|
|
+++ b/tools/testing/selftests/perf_events/mmap.c
|
|
@@ -0,0 +1,236 @@
|
|
+// SPDX-License-Identifier: GPL-2.0-only
|
|
+#define _GNU_SOURCE
|
|
+
|
|
+#include <dirent.h>
|
|
+#include <sched.h>
|
|
+#include <stdbool.h>
|
|
+#include <stdio.h>
|
|
+#include <unistd.h>
|
|
+
|
|
+#include <sys/ioctl.h>
|
|
+#include <sys/mman.h>
|
|
+#include <sys/syscall.h>
|
|
+#include <sys/types.h>
|
|
+
|
|
+#include <linux/perf_event.h>
|
|
+
|
|
+#include "../kselftest_harness.h"
|
|
+
|
|
+#define RB_SIZE 0x3000
|
|
+#define AUX_SIZE 0x10000
|
|
+#define AUX_OFFS 0x4000
|
|
+
|
|
+#define HOLE_SIZE 0x1000
|
|
+
|
|
+/* Reserve space for rb, aux with space for shrink-beyond-vma testing. */
|
|
+#define REGION_SIZE (2 * RB_SIZE + 2 * AUX_SIZE)
|
|
+#define REGION_AUX_OFFS (2 * RB_SIZE)
|
|
+
|
|
+#define MAP_BASE 1
|
|
+#define MAP_AUX 2
|
|
+
|
|
+#define EVENT_SRC_DIR "/sys/bus/event_source/devices"
|
|
+
|
|
+FIXTURE(perf_mmap)
|
|
+{
|
|
+ int fd;
|
|
+ void *ptr;
|
|
+ void *region;
|
|
+};
|
|
+
|
|
+FIXTURE_VARIANT(perf_mmap)
|
|
+{
|
|
+ bool aux;
|
|
+ unsigned long ptr_size;
|
|
+};
|
|
+
|
|
+FIXTURE_VARIANT_ADD(perf_mmap, rb)
|
|
+{
|
|
+ .aux = false,
|
|
+ .ptr_size = RB_SIZE,
|
|
+};
|
|
+
|
|
+FIXTURE_VARIANT_ADD(perf_mmap, aux)
|
|
+{
|
|
+ .aux = true,
|
|
+ .ptr_size = AUX_SIZE,
|
|
+};
|
|
+
|
|
+static bool read_event_type(struct dirent *dent, __u32 *type)
|
|
+{
|
|
+ char typefn[512];
|
|
+ FILE *fp;
|
|
+ int res;
|
|
+
|
|
+ snprintf(typefn, sizeof(typefn), "%s/%s/type", EVENT_SRC_DIR, dent->d_name);
|
|
+ fp = fopen(typefn, "r");
|
|
+ if (!fp)
|
|
+ return false;
|
|
+
|
|
+ res = fscanf(fp, "%u", type);
|
|
+ fclose(fp);
|
|
+ return res > 0;
|
|
+}
|
|
+
|
|
+FIXTURE_SETUP(perf_mmap)
|
|
+{
|
|
+ struct perf_event_attr attr = {
|
|
+ .size = sizeof(attr),
|
|
+ .disabled = 1,
|
|
+ .exclude_kernel = 1,
|
|
+ .exclude_hv = 1,
|
|
+ };
|
|
+ struct perf_event_attr attr_ok = {};
|
|
+ unsigned int eacces = 0, map = 0;
|
|
+ struct perf_event_mmap_page *rb;
|
|
+ struct dirent *dent;
|
|
+ void *aux, *region;
|
|
+ DIR *dir;
|
|
+
|
|
+ self->ptr = NULL;
|
|
+
|
|
+ dir = opendir(EVENT_SRC_DIR);
|
|
+ if (!dir)
|
|
+ SKIP(return, "perf not available.");
|
|
+
|
|
+ region = mmap(NULL, REGION_SIZE, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0);
|
|
+ ASSERT_NE(region, MAP_FAILED);
|
|
+ self->region = region;
|
|
+
|
|
+ // Try to find a suitable event on this system
|
|
+ while ((dent = readdir(dir))) {
|
|
+ int fd;
|
|
+
|
|
+ if (!read_event_type(dent, &attr.type))
|
|
+ continue;
|
|
+
|
|
+ fd = syscall(SYS_perf_event_open, &attr, 0, -1, -1, 0);
|
|
+ if (fd < 0) {
|
|
+ if (errno == EACCES)
|
|
+ eacces++;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ // Check whether the event supports mmap()
|
|
+ rb = mmap(region, RB_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, 0);
|
|
+ if (rb == MAP_FAILED) {
|
|
+ close(fd);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (!map) {
|
|
+ // Save the event in case that no AUX capable event is found
|
|
+ attr_ok = attr;
|
|
+ map = MAP_BASE;
|
|
+ }
|
|
+
|
|
+ if (!variant->aux)
|
|
+ continue;
|
|
+
|
|
+ rb->aux_offset = AUX_OFFS;
|
|
+ rb->aux_size = AUX_SIZE;
|
|
+
|
|
+ // Check whether it supports a AUX buffer
|
|
+ aux = mmap(region + REGION_AUX_OFFS, AUX_SIZE, PROT_READ | PROT_WRITE,
|
|
+ MAP_SHARED | MAP_FIXED, fd, AUX_OFFS);
|
|
+ if (aux == MAP_FAILED) {
|
|
+ munmap(rb, RB_SIZE);
|
|
+ close(fd);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ attr_ok = attr;
|
|
+ map = MAP_AUX;
|
|
+ munmap(aux, AUX_SIZE);
|
|
+ munmap(rb, RB_SIZE);
|
|
+ close(fd);
|
|
+ break;
|
|
+ }
|
|
+ closedir(dir);
|
|
+
|
|
+ if (!map) {
|
|
+ if (!eacces)
|
|
+ SKIP(return, "No mappable perf event found.");
|
|
+ else
|
|
+ SKIP(return, "No permissions for perf_event_open()");
|
|
+ }
|
|
+
|
|
+ self->fd = syscall(SYS_perf_event_open, &attr_ok, 0, -1, -1, 0);
|
|
+ ASSERT_NE(self->fd, -1);
|
|
+
|
|
+ rb = mmap(region, RB_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, self->fd, 0);
|
|
+ ASSERT_NE(rb, MAP_FAILED);
|
|
+
|
|
+ if (!variant->aux) {
|
|
+ self->ptr = rb;
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (map != MAP_AUX)
|
|
+ SKIP(return, "No AUX event found.");
|
|
+
|
|
+ rb->aux_offset = AUX_OFFS;
|
|
+ rb->aux_size = AUX_SIZE;
|
|
+ aux = mmap(region + REGION_AUX_OFFS, AUX_SIZE, PROT_READ | PROT_WRITE,
|
|
+ MAP_SHARED | MAP_FIXED, self->fd, AUX_OFFS);
|
|
+ ASSERT_NE(aux, MAP_FAILED);
|
|
+ self->ptr = aux;
|
|
+}
|
|
+
|
|
+FIXTURE_TEARDOWN(perf_mmap)
|
|
+{
|
|
+ ASSERT_EQ(munmap(self->region, REGION_SIZE), 0);
|
|
+ if (self->fd != -1)
|
|
+ ASSERT_EQ(close(self->fd), 0);
|
|
+}
|
|
+
|
|
+TEST_F(perf_mmap, remap)
|
|
+{
|
|
+ void *tmp, *ptr = self->ptr;
|
|
+ unsigned long size = variant->ptr_size;
|
|
+
|
|
+ // Test the invalid remaps
|
|
+ ASSERT_EQ(mremap(ptr, size, HOLE_SIZE, MREMAP_MAYMOVE), MAP_FAILED);
|
|
+ ASSERT_EQ(mremap(ptr + HOLE_SIZE, size, HOLE_SIZE, MREMAP_MAYMOVE), MAP_FAILED);
|
|
+ ASSERT_EQ(mremap(ptr + size - HOLE_SIZE, HOLE_SIZE, size, MREMAP_MAYMOVE), MAP_FAILED);
|
|
+ // Shrink the end of the mapping such that we only unmap past end of the VMA,
|
|
+ // which should succeed and poke a hole into the PROT_NONE region
|
|
+ ASSERT_NE(mremap(ptr + size - HOLE_SIZE, size, HOLE_SIZE, MREMAP_MAYMOVE), MAP_FAILED);
|
|
+
|
|
+ // Remap the whole buffer to a new address
|
|
+ tmp = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
|
|
+ ASSERT_NE(tmp, MAP_FAILED);
|
|
+
|
|
+ // Try splitting offset 1 hole size into VMA, this should fail
|
|
+ ASSERT_EQ(mremap(ptr + HOLE_SIZE, size - HOLE_SIZE, size - HOLE_SIZE,
|
|
+ MREMAP_MAYMOVE | MREMAP_FIXED, tmp), MAP_FAILED);
|
|
+ // Remapping the whole thing should succeed fine
|
|
+ ptr = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, tmp);
|
|
+ ASSERT_EQ(ptr, tmp);
|
|
+ ASSERT_EQ(munmap(tmp, size), 0);
|
|
+}
|
|
+
|
|
+TEST_F(perf_mmap, unmap)
|
|
+{
|
|
+ unsigned long size = variant->ptr_size;
|
|
+
|
|
+ // Try to poke holes into the mappings
|
|
+ ASSERT_NE(munmap(self->ptr, HOLE_SIZE), 0);
|
|
+ ASSERT_NE(munmap(self->ptr + HOLE_SIZE, HOLE_SIZE), 0);
|
|
+ ASSERT_NE(munmap(self->ptr + size - HOLE_SIZE, HOLE_SIZE), 0);
|
|
+}
|
|
+
|
|
+TEST_F(perf_mmap, map)
|
|
+{
|
|
+ unsigned long size = variant->ptr_size;
|
|
+
|
|
+ // Try to poke holes into the mappings by mapping anonymous memory over it
|
|
+ ASSERT_EQ(mmap(self->ptr, HOLE_SIZE, PROT_READ | PROT_WRITE,
|
|
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0), MAP_FAILED);
|
|
+ ASSERT_EQ(mmap(self->ptr + HOLE_SIZE, HOLE_SIZE, PROT_READ | PROT_WRITE,
|
|
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0), MAP_FAILED);
|
|
+ ASSERT_EQ(mmap(self->ptr + size - HOLE_SIZE, HOLE_SIZE, PROT_READ | PROT_WRITE,
|
|
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0), MAP_FAILED);
|
|
+}
|
|
+
|
|
+TEST_HARNESS_MAIN
|
|
diff --git a/tools/testing/selftests/syscall_user_dispatch/sud_test.c b/tools/testing/selftests/syscall_user_dispatch/sud_test.c
|
|
index d975a67673299f..48cf01aeec3e77 100644
|
|
--- a/tools/testing/selftests/syscall_user_dispatch/sud_test.c
|
|
+++ b/tools/testing/selftests/syscall_user_dispatch/sud_test.c
|
|
@@ -79,6 +79,21 @@ TEST_SIGNAL(dispatch_trigger_sigsys, SIGSYS)
|
|
}
|
|
}
|
|
|
|
+static void prctl_valid(struct __test_metadata *_metadata,
|
|
+ unsigned long op, unsigned long off,
|
|
+ unsigned long size, void *sel)
|
|
+{
|
|
+ EXPECT_EQ(0, prctl(PR_SET_SYSCALL_USER_DISPATCH, op, off, size, sel));
|
|
+}
|
|
+
|
|
+static void prctl_invalid(struct __test_metadata *_metadata,
|
|
+ unsigned long op, unsigned long off,
|
|
+ unsigned long size, void *sel, int err)
|
|
+{
|
|
+ EXPECT_EQ(-1, prctl(PR_SET_SYSCALL_USER_DISPATCH, op, off, size, sel));
|
|
+ EXPECT_EQ(err, errno);
|
|
+}
|
|
+
|
|
TEST(bad_prctl_param)
|
|
{
|
|
char sel = SYSCALL_DISPATCH_FILTER_ALLOW;
|
|
@@ -86,57 +101,42 @@ TEST(bad_prctl_param)
|
|
|
|
/* Invalid op */
|
|
op = -1;
|
|
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0, 0, &sel);
|
|
- ASSERT_EQ(EINVAL, errno);
|
|
+ prctl_invalid(_metadata, op, 0, 0, &sel, EINVAL);
|
|
|
|
/* PR_SYS_DISPATCH_OFF */
|
|
op = PR_SYS_DISPATCH_OFF;
|
|
|
|
/* offset != 0 */
|
|
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x1, 0x0, 0);
|
|
- EXPECT_EQ(EINVAL, errno);
|
|
+ prctl_invalid(_metadata, op, 0x1, 0x0, 0, EINVAL);
|
|
|
|
/* len != 0 */
|
|
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x0, 0xff, 0);
|
|
- EXPECT_EQ(EINVAL, errno);
|
|
+ prctl_invalid(_metadata, op, 0x0, 0xff, 0, EINVAL);
|
|
|
|
/* sel != NULL */
|
|
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x0, 0x0, &sel);
|
|
- EXPECT_EQ(EINVAL, errno);
|
|
+ prctl_invalid(_metadata, op, 0x0, 0x0, &sel, EINVAL);
|
|
|
|
/* Valid parameter */
|
|
- errno = 0;
|
|
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x0, 0x0, 0x0);
|
|
- EXPECT_EQ(0, errno);
|
|
+ prctl_valid(_metadata, op, 0x0, 0x0, 0x0);
|
|
|
|
/* PR_SYS_DISPATCH_ON */
|
|
op = PR_SYS_DISPATCH_ON;
|
|
|
|
/* Dispatcher region is bad (offset > 0 && len == 0) */
|
|
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x1, 0x0, &sel);
|
|
- EXPECT_EQ(EINVAL, errno);
|
|
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, -1L, 0x0, &sel);
|
|
- EXPECT_EQ(EINVAL, errno);
|
|
+ prctl_invalid(_metadata, op, 0x1, 0x0, &sel, EINVAL);
|
|
+ prctl_invalid(_metadata, op, -1L, 0x0, &sel, EINVAL);
|
|
|
|
/* Invalid selector */
|
|
- prctl(PR_SET_SYSCALL_USER_DISPATCH, op, 0x0, 0x1, (void *) -1);
|
|
- ASSERT_EQ(EFAULT, errno);
|
|
+ prctl_invalid(_metadata, op, 0x0, 0x1, (void *) -1, EFAULT);
|
|
|
|
/*
|
|
* Dispatcher range overflows unsigned long
|
|
*/
|
|
- prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON, 1, -1L, &sel);
|
|
- ASSERT_EQ(EINVAL, errno) {
|
|
- TH_LOG("Should reject bad syscall range");
|
|
- }
|
|
+ prctl_invalid(_metadata, PR_SYS_DISPATCH_ON, 1, -1L, &sel, EINVAL);
|
|
|
|
/*
|
|
* Allowed range overflows usigned long
|
|
*/
|
|
- prctl(PR_SET_SYSCALL_USER_DISPATCH, PR_SYS_DISPATCH_ON, -1L, 0x1, &sel);
|
|
- ASSERT_EQ(EINVAL, errno) {
|
|
- TH_LOG("Should reject bad syscall range");
|
|
- }
|
|
+ prctl_invalid(_metadata, PR_SYS_DISPATCH_ON, -1L, 0x1, &sel, EINVAL);
|
|
}
|
|
|
|
/*
|
|
diff --git a/tools/verification/rv/src/in_kernel.c b/tools/verification/rv/src/in_kernel.c
|
|
index f04479ecc96c0b..ced72950cb1eed 100644
|
|
--- a/tools/verification/rv/src/in_kernel.c
|
|
+++ b/tools/verification/rv/src/in_kernel.c
|
|
@@ -353,7 +353,7 @@ ikm_event_handler(struct trace_seq *s, struct tep_record *record,
|
|
|
|
if (config_has_id && (config_my_pid == id))
|
|
return 0;
|
|
- else if (config_my_pid && (config_my_pid == pid))
|
|
+ else if (config_my_pid == pid)
|
|
return 0;
|
|
|
|
tep_print_event(trace_event->tep, s, record, "%16s-%-8d ", TEP_PRINT_COMM, TEP_PRINT_PID);
|
|
@@ -595,7 +595,7 @@ static int parse_arguments(char *monitor_name, int argc, char **argv)
|
|
config_reactor = optarg;
|
|
break;
|
|
case 's':
|
|
- config_my_pid = 0;
|
|
+ config_my_pid = -1;
|
|
break;
|
|
case 't':
|
|
config_trace = 1;
|