mirror of
https://github.com/armbian/build
synced 2025-09-24 19:47:06 +07:00
12090 lines
398 KiB
Diff
12090 lines
398 KiB
Diff
diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt
|
|
index 63fd4e6a014bc..8b738855e1c5a 100644
|
|
--- a/Documentation/admin-guide/devices.txt
|
|
+++ b/Documentation/admin-guide/devices.txt
|
|
@@ -3003,10 +3003,10 @@
|
|
65 = /dev/infiniband/issm1 Second InfiniBand IsSM device
|
|
...
|
|
127 = /dev/infiniband/issm63 63rd InfiniBand IsSM device
|
|
- 128 = /dev/infiniband/uverbs0 First InfiniBand verbs device
|
|
- 129 = /dev/infiniband/uverbs1 Second InfiniBand verbs device
|
|
+ 192 = /dev/infiniband/uverbs0 First InfiniBand verbs device
|
|
+ 193 = /dev/infiniband/uverbs1 Second InfiniBand verbs device
|
|
...
|
|
- 159 = /dev/infiniband/uverbs31 31st InfiniBand verbs device
|
|
+ 223 = /dev/infiniband/uverbs31 31st InfiniBand verbs device
|
|
|
|
232 char Biometric Devices
|
|
0 = /dev/biometric/sensor0/fingerprint first fingerprint sensor on first device
|
|
diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
|
|
index 38dc56a577604..ecec514b31550 100644
|
|
--- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
|
|
+++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
|
|
@@ -43,19 +43,19 @@ group emmc_nb
|
|
|
|
group pwm0
|
|
- pin 11 (GPIO1-11)
|
|
- - functions pwm, gpio
|
|
+ - functions pwm, led, gpio
|
|
|
|
group pwm1
|
|
- pin 12
|
|
- - functions pwm, gpio
|
|
+ - functions pwm, led, gpio
|
|
|
|
group pwm2
|
|
- pin 13
|
|
- - functions pwm, gpio
|
|
+ - functions pwm, led, gpio
|
|
|
|
group pwm3
|
|
- pin 14
|
|
- - functions pwm, gpio
|
|
+ - functions pwm, led, gpio
|
|
|
|
group pmic1
|
|
- pin 7
|
|
diff --git a/Makefile b/Makefile
|
|
index 8b1f1e7517b94..a47273ecfdf21 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,7 +1,7 @@
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
VERSION = 5
|
|
PATCHLEVEL = 10
|
|
-SUBLEVEL = 66
|
|
+SUBLEVEL = 67
|
|
EXTRAVERSION =
|
|
NAME = Dare mighty things
|
|
|
|
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
|
|
index 0d6ee56f5831e..175213d7a1aa1 100644
|
|
--- a/arch/arm/boot/compressed/Makefile
|
|
+++ b/arch/arm/boot/compressed/Makefile
|
|
@@ -84,6 +84,8 @@ compress-$(CONFIG_KERNEL_LZ4) = lz4
|
|
libfdt_objs := fdt_rw.o fdt_ro.o fdt_wip.o fdt.o
|
|
|
|
ifeq ($(CONFIG_ARM_ATAG_DTB_COMPAT),y)
|
|
+CFLAGS_REMOVE_atags_to_fdt.o += -Wframe-larger-than=${CONFIG_FRAME_WARN}
|
|
+CFLAGS_atags_to_fdt.o += -Wframe-larger-than=1280
|
|
OBJS += $(libfdt_objs) atags_to_fdt.o
|
|
endif
|
|
|
|
diff --git a/arch/arm/boot/dts/at91-kizbox3_common.dtsi b/arch/arm/boot/dts/at91-kizbox3_common.dtsi
|
|
index 7c3076e245efa..dc77d8e80e567 100644
|
|
--- a/arch/arm/boot/dts/at91-kizbox3_common.dtsi
|
|
+++ b/arch/arm/boot/dts/at91-kizbox3_common.dtsi
|
|
@@ -336,7 +336,7 @@
|
|
};
|
|
|
|
&shutdown_controller {
|
|
- atmel,shdwc-debouncer = <976>;
|
|
+ debounce-delay-us = <976>;
|
|
atmel,wakeup-rtc-timer;
|
|
|
|
input@0 {
|
|
diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts
|
|
index ebbc9b23aef1c..b1068cca42287 100644
|
|
--- a/arch/arm/boot/dts/at91-sam9x60ek.dts
|
|
+++ b/arch/arm/boot/dts/at91-sam9x60ek.dts
|
|
@@ -662,7 +662,7 @@
|
|
};
|
|
|
|
&shutdown_controller {
|
|
- atmel,shdwc-debouncer = <976>;
|
|
+ debounce-delay-us = <976>;
|
|
status = "okay";
|
|
|
|
input@0 {
|
|
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
|
|
index d3cd2443ba252..9a18453d78428 100644
|
|
--- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
|
|
+++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
|
|
@@ -138,7 +138,7 @@
|
|
};
|
|
|
|
shdwc@f8048010 {
|
|
- atmel,shdwc-debouncer = <976>;
|
|
+ debounce-delay-us = <976>;
|
|
atmel,wakeup-rtc-timer;
|
|
|
|
input@0 {
|
|
diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
|
|
index 4883b84b4eded..20bcb7480d2ea 100644
|
|
--- a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
|
|
+++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
|
|
@@ -205,7 +205,7 @@
|
|
};
|
|
|
|
&shutdown_controller {
|
|
- atmel,shdwc-debouncer = <976>;
|
|
+ debounce-delay-us = <976>;
|
|
atmel,wakeup-rtc-timer;
|
|
|
|
input@0 {
|
|
diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts
|
|
index 19bb50f50c1fc..308d472bd1044 100644
|
|
--- a/arch/arm/boot/dts/at91-sama5d2_icp.dts
|
|
+++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
|
|
@@ -693,7 +693,7 @@
|
|
};
|
|
|
|
&shutdown_controller {
|
|
- atmel,shdwc-debouncer = <976>;
|
|
+ debounce-delay-us = <976>;
|
|
atmel,wakeup-rtc-timer;
|
|
|
|
input@0 {
|
|
diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
|
|
index 1c6361ba1aca4..317c6ddb56775 100644
|
|
--- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
|
|
+++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
|
|
@@ -203,7 +203,7 @@
|
|
};
|
|
|
|
shdwc@f8048010 {
|
|
- atmel,shdwc-debouncer = <976>;
|
|
+ debounce-delay-us = <976>;
|
|
|
|
input@0 {
|
|
reg = <0>;
|
|
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
|
|
index d767968ae2175..08c5182ba86bd 100644
|
|
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
|
|
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
|
|
@@ -347,7 +347,7 @@
|
|
};
|
|
|
|
shdwc@f8048010 {
|
|
- atmel,shdwc-debouncer = <976>;
|
|
+ debounce-delay-us = <976>;
|
|
atmel,wakeup-rtc-timer;
|
|
|
|
input@0 {
|
|
diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts
|
|
index f7dcdf96e5c00..6d9a5ede94aaf 100644
|
|
--- a/arch/arm/boot/dts/imx53-ppd.dts
|
|
+++ b/arch/arm/boot/dts/imx53-ppd.dts
|
|
@@ -70,6 +70,12 @@
|
|
clock-frequency = <11289600>;
|
|
};
|
|
|
|
+ achc_24M: achc-clock {
|
|
+ compatible = "fixed-clock";
|
|
+ #clock-cells = <0>;
|
|
+ clock-frequency = <24000000>;
|
|
+ };
|
|
+
|
|
sgtlsound: sound {
|
|
compatible = "fsl,imx53-cpuvo-sgtl5000",
|
|
"fsl,imx-audio-sgtl5000";
|
|
@@ -313,16 +319,13 @@
|
|
&gpio4 12 GPIO_ACTIVE_LOW>;
|
|
status = "okay";
|
|
|
|
- spidev0: spi@0 {
|
|
- compatible = "ge,achc";
|
|
- reg = <0>;
|
|
- spi-max-frequency = <1000000>;
|
|
- };
|
|
-
|
|
- spidev1: spi@1 {
|
|
- compatible = "ge,achc";
|
|
- reg = <1>;
|
|
- spi-max-frequency = <1000000>;
|
|
+ spidev0: spi@1 {
|
|
+ compatible = "ge,achc", "nxp,kinetis-k20";
|
|
+ reg = <1>, <0>;
|
|
+ vdd-supply = <®_3v3>;
|
|
+ vdda-supply = <®_3v3>;
|
|
+ clocks = <&achc_24M>;
|
|
+ reset-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
|
|
};
|
|
|
|
gpioxra0: gpio@2 {
|
|
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
|
|
index 2687c4e890ba8..e36d590e83732 100644
|
|
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
|
|
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
|
|
@@ -1262,9 +1262,9 @@
|
|
<&mmcc DSI1_BYTE_CLK>,
|
|
<&mmcc DSI_PIXEL_CLK>,
|
|
<&mmcc DSI1_ESC_CLK>;
|
|
- clock-names = "iface_clk", "bus_clk", "core_mmss_clk",
|
|
- "src_clk", "byte_clk", "pixel_clk",
|
|
- "core_clk";
|
|
+ clock-names = "iface", "bus", "core_mmss",
|
|
+ "src", "byte", "pixel",
|
|
+ "core";
|
|
|
|
assigned-clocks = <&mmcc DSI1_BYTE_SRC>,
|
|
<&mmcc DSI1_ESC_SRC>,
|
|
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
|
|
index 633079245601b..fd0cd10cb0931 100644
|
|
--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
|
|
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
|
|
@@ -172,15 +172,15 @@
|
|
sgtl5000_tx_endpoint: endpoint@0 {
|
|
reg = <0>;
|
|
remote-endpoint = <&sai2a_endpoint>;
|
|
- frame-master;
|
|
- bitclock-master;
|
|
+ frame-master = <&sgtl5000_tx_endpoint>;
|
|
+ bitclock-master = <&sgtl5000_tx_endpoint>;
|
|
};
|
|
|
|
sgtl5000_rx_endpoint: endpoint@1 {
|
|
reg = <1>;
|
|
remote-endpoint = <&sai2b_endpoint>;
|
|
- frame-master;
|
|
- bitclock-master;
|
|
+ frame-master = <&sgtl5000_rx_endpoint>;
|
|
+ bitclock-master = <&sgtl5000_rx_endpoint>;
|
|
};
|
|
};
|
|
|
|
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
|
|
index ec02cee1dd9b0..944d38b85eef4 100644
|
|
--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
|
|
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
|
|
@@ -185,8 +185,8 @@
|
|
&i2c4 {
|
|
hdmi-transmitter@3d {
|
|
compatible = "adi,adv7513";
|
|
- reg = <0x3d>, <0x2d>, <0x4d>, <0x5d>;
|
|
- reg-names = "main", "cec", "edid", "packet";
|
|
+ reg = <0x3d>, <0x4d>, <0x2d>, <0x5d>;
|
|
+ reg-names = "main", "edid", "cec", "packet";
|
|
clocks = <&cec_clock>;
|
|
clock-names = "cec";
|
|
|
|
@@ -204,8 +204,6 @@
|
|
adi,input-depth = <8>;
|
|
adi,input-colorspace = "rgb";
|
|
adi,input-clock = "1x";
|
|
- adi,input-style = <1>;
|
|
- adi,input-justification = "evenly";
|
|
|
|
ports {
|
|
#address-cells = <1>;
|
|
diff --git a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
|
|
index 93398cfae97ee..47df8ac67cf1a 100644
|
|
--- a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
|
|
+++ b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
|
|
@@ -212,15 +212,15 @@
|
|
cs42l51_tx_endpoint: endpoint@0 {
|
|
reg = <0>;
|
|
remote-endpoint = <&sai2a_endpoint>;
|
|
- frame-master;
|
|
- bitclock-master;
|
|
+ frame-master = <&cs42l51_tx_endpoint>;
|
|
+ bitclock-master = <&cs42l51_tx_endpoint>;
|
|
};
|
|
|
|
cs42l51_rx_endpoint: endpoint@1 {
|
|
reg = <1>;
|
|
remote-endpoint = <&sai2b_endpoint>;
|
|
- frame-master;
|
|
- bitclock-master;
|
|
+ frame-master = <&cs42l51_rx_endpoint>;
|
|
+ bitclock-master = <&cs42l51_rx_endpoint>;
|
|
};
|
|
};
|
|
};
|
|
diff --git a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
|
|
index 5d0f0fbba1d2e..5dbfb83c1b06b 100644
|
|
--- a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
|
|
+++ b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
|
|
@@ -704,7 +704,6 @@
|
|
nvidia,xcvr-setup-use-fuses;
|
|
nvidia,xcvr-lsfslew = <2>;
|
|
nvidia,xcvr-lsrslew = <2>;
|
|
- vbus-supply = <&vdd_vbus1>;
|
|
};
|
|
|
|
usb@c5008000 {
|
|
@@ -716,7 +715,7 @@
|
|
nvidia,xcvr-setup-use-fuses;
|
|
nvidia,xcvr-lsfslew = <2>;
|
|
nvidia,xcvr-lsrslew = <2>;
|
|
- vbus-supply = <&vdd_vbus3>;
|
|
+ vbus-supply = <&vdd_5v0_sys>;
|
|
};
|
|
|
|
brcm_wifi_pwrseq: wifi-pwrseq {
|
|
@@ -967,28 +966,6 @@
|
|
vin-supply = <&vdd_5v0_sys>;
|
|
};
|
|
|
|
- vdd_vbus1: regulator@4 {
|
|
- compatible = "regulator-fixed";
|
|
- regulator-name = "vdd_usb1_vbus";
|
|
- regulator-min-microvolt = <5000000>;
|
|
- regulator-max-microvolt = <5000000>;
|
|
- regulator-always-on;
|
|
- gpio = <&gpio TEGRA_GPIO(D, 0) GPIO_ACTIVE_HIGH>;
|
|
- enable-active-high;
|
|
- vin-supply = <&vdd_5v0_sys>;
|
|
- };
|
|
-
|
|
- vdd_vbus3: regulator@5 {
|
|
- compatible = "regulator-fixed";
|
|
- regulator-name = "vdd_usb3_vbus";
|
|
- regulator-min-microvolt = <5000000>;
|
|
- regulator-max-microvolt = <5000000>;
|
|
- regulator-always-on;
|
|
- gpio = <&gpio TEGRA_GPIO(D, 3) GPIO_ACTIVE_HIGH>;
|
|
- enable-active-high;
|
|
- vin-supply = <&vdd_5v0_sys>;
|
|
- };
|
|
-
|
|
sound {
|
|
compatible = "nvidia,tegra-audio-wm8903-picasso",
|
|
"nvidia,tegra-audio-wm8903";
|
|
diff --git a/arch/arm/boot/dts/tegra20-tamonten.dtsi b/arch/arm/boot/dts/tegra20-tamonten.dtsi
|
|
index 95e6bccdb4f6e..dd4d506683de7 100644
|
|
--- a/arch/arm/boot/dts/tegra20-tamonten.dtsi
|
|
+++ b/arch/arm/boot/dts/tegra20-tamonten.dtsi
|
|
@@ -185,8 +185,9 @@
|
|
nvidia,pins = "ata", "atb", "atc", "atd", "ate",
|
|
"cdev1", "cdev2", "dap1", "dtb", "gma",
|
|
"gmb", "gmc", "gmd", "gme", "gpu7",
|
|
- "gpv", "i2cp", "pta", "rm", "slxa",
|
|
- "slxk", "spia", "spib", "uac";
|
|
+ "gpv", "i2cp", "irrx", "irtx", "pta",
|
|
+ "rm", "slxa", "slxk", "spia", "spib",
|
|
+ "uac";
|
|
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
|
|
nvidia,tristate = <TEGRA_PIN_DISABLE>;
|
|
};
|
|
@@ -211,7 +212,7 @@
|
|
conf_ddc {
|
|
nvidia,pins = "ddc", "dta", "dtd", "kbca",
|
|
"kbcb", "kbcc", "kbcd", "kbce", "kbcf",
|
|
- "sdc";
|
|
+ "sdc", "uad", "uca";
|
|
nvidia,pull = <TEGRA_PIN_PULL_UP>;
|
|
nvidia,tristate = <TEGRA_PIN_DISABLE>;
|
|
};
|
|
@@ -221,10 +222,9 @@
|
|
"lvp0", "owc", "sdb";
|
|
nvidia,tristate = <TEGRA_PIN_ENABLE>;
|
|
};
|
|
- conf_irrx {
|
|
- nvidia,pins = "irrx", "irtx", "sdd", "spic",
|
|
- "spie", "spih", "uaa", "uab", "uad",
|
|
- "uca", "ucb";
|
|
+ conf_sdd {
|
|
+ nvidia,pins = "sdd", "spic", "spie", "spih",
|
|
+ "uaa", "uab", "ucb";
|
|
nvidia,pull = <TEGRA_PIN_PULL_UP>;
|
|
nvidia,tristate = <TEGRA_PIN_ENABLE>;
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts
|
|
index be81330db14f6..02641191682e0 100644
|
|
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts
|
|
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts
|
|
@@ -32,14 +32,14 @@
|
|
};
|
|
};
|
|
|
|
- reg_vcc3v3: vcc3v3 {
|
|
+ reg_vcc3v3: regulator-vcc3v3 {
|
|
compatible = "regulator-fixed";
|
|
regulator-name = "vcc3v3";
|
|
regulator-min-microvolt = <3300000>;
|
|
regulator-max-microvolt = <3300000>;
|
|
};
|
|
|
|
- reg_vdd_cpu_gpu: vdd-cpu-gpu {
|
|
+ reg_vdd_cpu_gpu: regulator-vdd-cpu-gpu {
|
|
compatible = "regulator-fixed";
|
|
regulator-name = "vdd-cpu-gpu";
|
|
regulator-min-microvolt = <1135000>;
|
|
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-frwy.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-frwy.dts
|
|
index db3d303093f61..6d22efbd645cb 100644
|
|
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-frwy.dts
|
|
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-frwy.dts
|
|
@@ -83,15 +83,9 @@
|
|
};
|
|
|
|
eeprom@52 {
|
|
- compatible = "atmel,24c512";
|
|
+ compatible = "onnn,cat24c04", "atmel,24c04";
|
|
reg = <0x52>;
|
|
};
|
|
-
|
|
- eeprom@53 {
|
|
- compatible = "atmel,24c512";
|
|
- reg = <0x53>;
|
|
- };
|
|
-
|
|
};
|
|
};
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
|
|
index d53ccc56bb639..07139e35686d7 100644
|
|
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
|
|
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
|
|
@@ -58,14 +58,9 @@
|
|
};
|
|
|
|
eeprom@52 {
|
|
- compatible = "atmel,24c512";
|
|
+ compatible = "onnn,cat24c05", "atmel,24c04";
|
|
reg = <0x52>;
|
|
};
|
|
-
|
|
- eeprom@53 {
|
|
- compatible = "atmel,24c512";
|
|
- reg = <0x53>;
|
|
- };
|
|
};
|
|
|
|
&i2c3 {
|
|
diff --git a/arch/arm64/boot/dts/nvidia/tegra132.dtsi b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
|
|
index e40281510c0c0..b14e9f3bfdbdc 100644
|
|
--- a/arch/arm64/boot/dts/nvidia/tegra132.dtsi
|
|
+++ b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
|
|
@@ -1215,13 +1215,13 @@
|
|
|
|
cpu@0 {
|
|
device_type = "cpu";
|
|
- compatible = "nvidia,denver";
|
|
+ compatible = "nvidia,tegra132-denver";
|
|
reg = <0>;
|
|
};
|
|
|
|
cpu@1 {
|
|
device_type = "cpu";
|
|
- compatible = "nvidia,denver";
|
|
+ compatible = "nvidia,tegra132-denver";
|
|
reg = <1>;
|
|
};
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
|
|
index 6946fb210e484..9b5007e5f790f 100644
|
|
--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
|
|
+++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
|
|
@@ -1976,7 +1976,7 @@
|
|
};
|
|
|
|
pcie_ep@14160000 {
|
|
- compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
|
|
+ compatible = "nvidia,tegra194-pcie-ep";
|
|
power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX4A>;
|
|
reg = <0x00 0x14160000 0x0 0x00020000>, /* appl registers (128K) */
|
|
<0x00 0x36040000 0x0 0x00040000>, /* iATU_DMA reg space (256K) */
|
|
@@ -2008,7 +2008,7 @@
|
|
};
|
|
|
|
pcie_ep@14180000 {
|
|
- compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
|
|
+ compatible = "nvidia,tegra194-pcie-ep";
|
|
power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>;
|
|
reg = <0x00 0x14180000 0x0 0x00020000>, /* appl registers (128K) */
|
|
<0x00 0x38040000 0x0 0x00040000>, /* iATU_DMA reg space (256K) */
|
|
@@ -2040,7 +2040,7 @@
|
|
};
|
|
|
|
pcie_ep@141a0000 {
|
|
- compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
|
|
+ compatible = "nvidia,tegra194-pcie-ep";
|
|
power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8A>;
|
|
reg = <0x00 0x141a0000 0x0 0x00020000>, /* appl registers (128K) */
|
|
<0x00 0x3a040000 0x0 0x00040000>, /* iATU_DMA reg space (256K) */
|
|
diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
|
|
index cdc1e3d60c58e..3ceb36cac512f 100644
|
|
--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
|
|
+++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
|
|
@@ -151,7 +151,7 @@
|
|
#size-cells = <2>;
|
|
ranges;
|
|
|
|
- rpm_msg_ram: memory@0x60000 {
|
|
+ rpm_msg_ram: memory@60000 {
|
|
reg = <0x0 0x60000 0x0 0x6000>;
|
|
no-map;
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
|
|
index e8c37a1693d3b..cc08dc4eb56a5 100644
|
|
--- a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
|
|
+++ b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
|
|
@@ -20,7 +20,7 @@
|
|
stdout-path = "serial0";
|
|
};
|
|
|
|
- memory {
|
|
+ memory@40000000 {
|
|
device_type = "memory";
|
|
reg = <0x0 0x40000000 0x0 0x20000000>;
|
|
};
|
|
diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
|
|
index 829e37ac82f66..776a6b0f61a62 100644
|
|
--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
|
|
+++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
|
|
@@ -567,10 +567,10 @@
|
|
|
|
pcie1: pci@10000000 {
|
|
compatible = "qcom,pcie-ipq8074";
|
|
- reg = <0x10000000 0xf1d
|
|
- 0x10000f20 0xa8
|
|
- 0x00088000 0x2000
|
|
- 0x10100000 0x1000>;
|
|
+ reg = <0x10000000 0xf1d>,
|
|
+ <0x10000f20 0xa8>,
|
|
+ <0x00088000 0x2000>,
|
|
+ <0x10100000 0x1000>;
|
|
reg-names = "dbi", "elbi", "parf", "config";
|
|
device_type = "pci";
|
|
linux,pci-domain = <1>;
|
|
@@ -629,10 +629,10 @@
|
|
|
|
pcie0: pci@20000000 {
|
|
compatible = "qcom,pcie-ipq8074";
|
|
- reg = <0x20000000 0xf1d
|
|
- 0x20000f20 0xa8
|
|
- 0x00080000 0x2000
|
|
- 0x20100000 0x1000>;
|
|
+ reg = <0x20000000 0xf1d>,
|
|
+ <0x20000f20 0xa8>,
|
|
+ <0x00080000 0x2000>,
|
|
+ <0x20100000 0x1000>;
|
|
reg-names = "dbi", "elbi", "parf", "config";
|
|
device_type = "pci";
|
|
linux,pci-domain = <0>;
|
|
diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi
|
|
index 6707f898607fe..45f9a44326a6d 100644
|
|
--- a/arch/arm64/boot/dts/qcom/msm8994.dtsi
|
|
+++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi
|
|
@@ -14,16 +14,18 @@
|
|
chosen { };
|
|
|
|
clocks {
|
|
- xo_board: xo_board {
|
|
+ xo_board: xo-board {
|
|
compatible = "fixed-clock";
|
|
#clock-cells = <0>;
|
|
clock-frequency = <19200000>;
|
|
+ clock-output-names = "xo_board";
|
|
};
|
|
|
|
- sleep_clk: sleep_clk {
|
|
+ sleep_clk: sleep-clk {
|
|
compatible = "fixed-clock";
|
|
#clock-cells = <0>;
|
|
clock-frequency = <32768>;
|
|
+ clock-output-names = "sleep_clk";
|
|
};
|
|
};
|
|
|
|
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
|
|
index fd6ae5464dea4..eef17434d12ae 100644
|
|
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
|
|
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
|
|
@@ -17,14 +17,14 @@
|
|
chosen { };
|
|
|
|
clocks {
|
|
- xo_board: xo_board {
|
|
+ xo_board: xo-board {
|
|
compatible = "fixed-clock";
|
|
#clock-cells = <0>;
|
|
clock-frequency = <19200000>;
|
|
clock-output-names = "xo_board";
|
|
};
|
|
|
|
- sleep_clk: sleep_clk {
|
|
+ sleep_clk: sleep-clk {
|
|
compatible = "fixed-clock";
|
|
#clock-cells = <0>;
|
|
clock-frequency = <32764>;
|
|
diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi
|
|
index deb928d303c22..f87054575ce7f 100644
|
|
--- a/arch/arm64/boot/dts/qcom/sdm630.dtsi
|
|
+++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi
|
|
@@ -17,14 +17,14 @@
|
|
chosen { };
|
|
|
|
clocks {
|
|
- xo_board: xo_board {
|
|
+ xo_board: xo-board {
|
|
compatible = "fixed-clock";
|
|
#clock-cells = <0>;
|
|
clock-frequency = <19200000>;
|
|
clock-output-names = "xo_board";
|
|
};
|
|
|
|
- sleep_clk: sleep_clk {
|
|
+ sleep_clk: sleep-clk {
|
|
compatible = "fixed-clock";
|
|
#clock-cells = <0>;
|
|
clock-frequency = <32764>;
|
|
@@ -343,10 +343,19 @@
|
|
};
|
|
|
|
qhee_code: qhee-code@85800000 {
|
|
- reg = <0x0 0x85800000 0x0 0x3700000>;
|
|
+ reg = <0x0 0x85800000 0x0 0x600000>;
|
|
no-map;
|
|
};
|
|
|
|
+ rmtfs_mem: memory@85e00000 {
|
|
+ compatible = "qcom,rmtfs-mem";
|
|
+ reg = <0x0 0x85e00000 0x0 0x200000>;
|
|
+ no-map;
|
|
+
|
|
+ qcom,client-id = <1>;
|
|
+ qcom,vmid = <15>;
|
|
+ };
|
|
+
|
|
smem_region: smem-mem@86000000 {
|
|
reg = <0 0x86000000 0 0x200000>;
|
|
no-map;
|
|
@@ -357,58 +366,44 @@
|
|
no-map;
|
|
};
|
|
|
|
- modem_fw_mem: modem-fw-region@8ac00000 {
|
|
+ mpss_region: mpss@8ac00000 {
|
|
reg = <0x0 0x8ac00000 0x0 0x7e00000>;
|
|
no-map;
|
|
};
|
|
|
|
- adsp_fw_mem: adsp-fw-region@92a00000 {
|
|
+ adsp_region: adsp@92a00000 {
|
|
reg = <0x0 0x92a00000 0x0 0x1e00000>;
|
|
no-map;
|
|
};
|
|
|
|
- pil_mba_mem: pil-mba-region@94800000 {
|
|
+ mba_region: mba@94800000 {
|
|
reg = <0x0 0x94800000 0x0 0x200000>;
|
|
no-map;
|
|
};
|
|
|
|
- buffer_mem: buffer-region@94a00000 {
|
|
+ buffer_mem: tzbuffer@94a00000 {
|
|
reg = <0x0 0x94a00000 0x0 0x100000>;
|
|
no-map;
|
|
};
|
|
|
|
- venus_fw_mem: venus-fw-region@9f800000 {
|
|
+ venus_region: venus@9f800000 {
|
|
reg = <0x0 0x9f800000 0x0 0x800000>;
|
|
no-map;
|
|
};
|
|
|
|
- secure_region2: secure-region2@f7c00000 {
|
|
- reg = <0x0 0xf7c00000 0x0 0x5c00000>;
|
|
- no-map;
|
|
- };
|
|
-
|
|
adsp_mem: adsp-region@f6000000 {
|
|
reg = <0x0 0xf6000000 0x0 0x800000>;
|
|
no-map;
|
|
};
|
|
|
|
- qseecom_ta_mem: qseecom-ta-region@fec00000 {
|
|
- reg = <0x0 0xfec00000 0x0 0x1000000>;
|
|
- no-map;
|
|
- };
|
|
-
|
|
qseecom_mem: qseecom-region@f6800000 {
|
|
reg = <0x0 0xf6800000 0x0 0x1400000>;
|
|
no-map;
|
|
};
|
|
|
|
- secure_display_memory: secure-region@f5c00000 {
|
|
- reg = <0x0 0xf5c00000 0x0 0x5c00000>;
|
|
- no-map;
|
|
- };
|
|
-
|
|
- cont_splash_mem: cont-splash-region@9d400000 {
|
|
- reg = <0x0 0x9d400000 0x0 0x23ff000>;
|
|
+ zap_shader_region: gpu@fed00000 {
|
|
+ compatible = "shared-dma-pool";
|
|
+ reg = <0x0 0xfed00000 0x0 0xa00000>;
|
|
no-map;
|
|
};
|
|
};
|
|
@@ -527,14 +522,18 @@
|
|
reg = <0x01f40000 0x20000>;
|
|
};
|
|
|
|
- tlmm: pinctrl@3000000 {
|
|
+ tlmm: pinctrl@3100000 {
|
|
compatible = "qcom,sdm630-pinctrl";
|
|
- reg = <0x03000000 0xc00000>;
|
|
+ reg = <0x03100000 0x400000>,
|
|
+ <0x03500000 0x400000>,
|
|
+ <0x03900000 0x400000>;
|
|
+ reg-names = "south", "center", "north";
|
|
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
|
|
gpio-controller;
|
|
- #gpio-cells = <0x2>;
|
|
+ gpio-ranges = <&tlmm 0 0 114>;
|
|
+ #gpio-cells = <2>;
|
|
interrupt-controller;
|
|
- #interrupt-cells = <0x2>;
|
|
+ #interrupt-cells = <2>;
|
|
|
|
blsp1_uart1_default: blsp1-uart1-default {
|
|
pins = "gpio0", "gpio1", "gpio2", "gpio3";
|
|
@@ -554,40 +553,48 @@
|
|
bias-disable;
|
|
};
|
|
|
|
- blsp2_uart1_tx_active: blsp2-uart1-tx-active {
|
|
- pins = "gpio16";
|
|
- drive-strength = <2>;
|
|
- bias-disable;
|
|
- };
|
|
-
|
|
- blsp2_uart1_tx_sleep: blsp2-uart1-tx-sleep {
|
|
- pins = "gpio16";
|
|
- drive-strength = <2>;
|
|
- bias-pull-up;
|
|
- };
|
|
+ blsp2_uart1_default: blsp2-uart1-active {
|
|
+ tx-rts {
|
|
+ pins = "gpio16", "gpio19";
|
|
+ function = "blsp_uart5";
|
|
+ drive-strength = <2>;
|
|
+ bias-disable;
|
|
+ };
|
|
|
|
- blsp2_uart1_rxcts_active: blsp2-uart1-rxcts-active {
|
|
- pins = "gpio17", "gpio18";
|
|
- drive-strength = <2>;
|
|
- bias-disable;
|
|
- };
|
|
+ rx {
|
|
+ /*
|
|
+ * Avoid garbage data while BT module
|
|
+ * is powered off or not driving signal
|
|
+ */
|
|
+ pins = "gpio17";
|
|
+ function = "blsp_uart5";
|
|
+ drive-strength = <2>;
|
|
+ bias-pull-up;
|
|
+ };
|
|
|
|
- blsp2_uart1_rxcts_sleep: blsp2-uart1-rxcts-sleep {
|
|
- pins = "gpio17", "gpio18";
|
|
- drive-strength = <2>;
|
|
- bias-no-pull;
|
|
+ cts {
|
|
+ /* Match the pull of the BT module */
|
|
+ pins = "gpio18";
|
|
+ function = "blsp_uart5";
|
|
+ drive-strength = <2>;
|
|
+ bias-pull-down;
|
|
+ };
|
|
};
|
|
|
|
- blsp2_uart1_rfr_active: blsp2-uart1-rfr-active {
|
|
- pins = "gpio19";
|
|
- drive-strength = <2>;
|
|
- bias-disable;
|
|
- };
|
|
+ blsp2_uart1_sleep: blsp2-uart1-sleep {
|
|
+ tx {
|
|
+ pins = "gpio16";
|
|
+ function = "gpio";
|
|
+ drive-strength = <2>;
|
|
+ bias-pull-up;
|
|
+ };
|
|
|
|
- blsp2_uart1_rfr_sleep: blsp2-uart1-rfr-sleep {
|
|
- pins = "gpio19";
|
|
- drive-strength = <2>;
|
|
- bias-no-pull;
|
|
+ rx-cts-rts {
|
|
+ pins = "gpio17", "gpio18", "gpio19";
|
|
+ function = "gpio";
|
|
+ drive-strength = <2>;
|
|
+ bias-no-pull;
|
|
+ };
|
|
};
|
|
|
|
i2c1_default: i2c1-default {
|
|
@@ -686,50 +693,106 @@
|
|
bias-pull-up;
|
|
};
|
|
|
|
- sdc1_clk_on: sdc1-clk-on {
|
|
- pins = "sdc1_clk";
|
|
- bias-disable;
|
|
- drive-strength = <16>;
|
|
- };
|
|
+ sdc1_state_on: sdc1-on {
|
|
+ clk {
|
|
+ pins = "sdc1_clk";
|
|
+ bias-disable;
|
|
+ drive-strength = <16>;
|
|
+ };
|
|
|
|
- sdc1_clk_off: sdc1-clk-off {
|
|
- pins = "sdc1_clk";
|
|
- bias-disable;
|
|
- drive-strength = <2>;
|
|
- };
|
|
+ cmd {
|
|
+ pins = "sdc1_cmd";
|
|
+ bias-pull-up;
|
|
+ drive-strength = <10>;
|
|
+ };
|
|
|
|
- sdc1_cmd_on: sdc1-cmd-on {
|
|
- pins = "sdc1_cmd";
|
|
- bias-pull-up;
|
|
- drive-strength = <10>;
|
|
- };
|
|
+ data {
|
|
+ pins = "sdc1_data";
|
|
+ bias-pull-up;
|
|
+ drive-strength = <10>;
|
|
+ };
|
|
|
|
- sdc1_cmd_off: sdc1-cmd-off {
|
|
- pins = "sdc1_cmd";
|
|
- bias-pull-up;
|
|
- drive-strength = <2>;
|
|
+ rclk {
|
|
+ pins = "sdc1_rclk";
|
|
+ bias-pull-down;
|
|
+ };
|
|
};
|
|
|
|
- sdc1_data_on: sdc1-data-on {
|
|
- pins = "sdc1_data";
|
|
- bias-pull-up;
|
|
- drive-strength = <8>;
|
|
- };
|
|
+ sdc1_state_off: sdc1-off {
|
|
+ clk {
|
|
+ pins = "sdc1_clk";
|
|
+ bias-disable;
|
|
+ drive-strength = <2>;
|
|
+ };
|
|
|
|
- sdc1_data_off: sdc1-data-off {
|
|
- pins = "sdc1_data";
|
|
- bias-pull-up;
|
|
- drive-strength = <2>;
|
|
+ cmd {
|
|
+ pins = "sdc1_cmd";
|
|
+ bias-pull-up;
|
|
+ drive-strength = <2>;
|
|
+ };
|
|
+
|
|
+ data {
|
|
+ pins = "sdc1_data";
|
|
+ bias-pull-up;
|
|
+ drive-strength = <2>;
|
|
+ };
|
|
+
|
|
+ rclk {
|
|
+ pins = "sdc1_rclk";
|
|
+ bias-pull-down;
|
|
+ };
|
|
};
|
|
|
|
- sdc1_rclk_on: sdc1-rclk-on {
|
|
- pins = "sdc1_rclk";
|
|
- bias-pull-down;
|
|
+ sdc2_state_on: sdc2-on {
|
|
+ clk {
|
|
+ pins = "sdc2_clk";
|
|
+ bias-disable;
|
|
+ drive-strength = <16>;
|
|
+ };
|
|
+
|
|
+ cmd {
|
|
+ pins = "sdc2_cmd";
|
|
+ bias-pull-up;
|
|
+ drive-strength = <10>;
|
|
+ };
|
|
+
|
|
+ data {
|
|
+ pins = "sdc2_data";
|
|
+ bias-pull-up;
|
|
+ drive-strength = <10>;
|
|
+ };
|
|
+
|
|
+ sd-cd {
|
|
+ pins = "gpio54";
|
|
+ bias-pull-up;
|
|
+ drive-strength = <2>;
|
|
+ };
|
|
};
|
|
|
|
- sdc1_rclk_off: sdc1-rclk-off {
|
|
- pins = "sdc1_rclk";
|
|
- bias-pull-down;
|
|
+ sdc2_state_off: sdc2-off {
|
|
+ clk {
|
|
+ pins = "sdc2_clk";
|
|
+ bias-disable;
|
|
+ drive-strength = <2>;
|
|
+ };
|
|
+
|
|
+ cmd {
|
|
+ pins = "sdc2_cmd";
|
|
+ bias-pull-up;
|
|
+ drive-strength = <2>;
|
|
+ };
|
|
+
|
|
+ data {
|
|
+ pins = "sdc2_data";
|
|
+ bias-pull-up;
|
|
+ drive-strength = <2>;
|
|
+ };
|
|
+
|
|
+ sd-cd {
|
|
+ pins = "gpio54";
|
|
+ bias-disable;
|
|
+ drive-strength = <2>;
|
|
+ };
|
|
};
|
|
};
|
|
|
|
@@ -821,8 +884,8 @@
|
|
clock-names = "core", "iface", "xo";
|
|
|
|
pinctrl-names = "default", "sleep";
|
|
- pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on &sdc1_rclk_on>;
|
|
- pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_rclk_off>;
|
|
+ pinctrl-0 = <&sdc1_state_on>;
|
|
+ pinctrl-1 = <&sdc1_state_off>;
|
|
|
|
bus-width = <8>;
|
|
non-removable;
|
|
@@ -967,10 +1030,8 @@
|
|
dmas = <&blsp2_dma 0>, <&blsp2_dma 1>;
|
|
dma-names = "tx", "rx";
|
|
pinctrl-names = "default", "sleep";
|
|
- pinctrl-0 = <&blsp2_uart1_tx_active &blsp2_uart1_rxcts_active
|
|
- &blsp2_uart1_rfr_active>;
|
|
- pinctrl-1 = <&blsp2_uart1_tx_sleep &blsp2_uart1_rxcts_sleep
|
|
- &blsp2_uart1_rfr_sleep>;
|
|
+ pinctrl-0 = <&blsp2_uart1_default>;
|
|
+ pinctrl-1 = <&blsp2_uart1_sleep>;
|
|
status = "disabled";
|
|
};
|
|
|
|
diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
|
|
index d4547a192748b..ec356fe07ac8a 100644
|
|
--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
|
|
+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
|
|
@@ -2346,7 +2346,7 @@
|
|
};
|
|
};
|
|
|
|
- epss_l3: interconnect@18591000 {
|
|
+ epss_l3: interconnect@18590000 {
|
|
compatible = "qcom,sm8250-epss-l3";
|
|
reg = <0 0x18590000 0 0x1000>;
|
|
|
|
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
|
|
index 587c504a4c8b2..4b06cf9a8c8aa 100644
|
|
--- a/arch/arm64/include/asm/kernel-pgtable.h
|
|
+++ b/arch/arm64/include/asm/kernel-pgtable.h
|
|
@@ -65,8 +65,8 @@
|
|
#define EARLY_KASLR (0)
|
|
#endif
|
|
|
|
-#define EARLY_ENTRIES(vstart, vend, shift) (((vend) >> (shift)) \
|
|
- - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
|
|
+#define EARLY_ENTRIES(vstart, vend, shift) \
|
|
+ ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
|
|
|
|
#define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT))
|
|
|
|
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
|
|
index b2e91c187e2a6..c7315862e2435 100644
|
|
--- a/arch/arm64/include/asm/mmu.h
|
|
+++ b/arch/arm64/include/asm/mmu.h
|
|
@@ -30,11 +30,32 @@ typedef struct {
|
|
} mm_context_t;
|
|
|
|
/*
|
|
- * This macro is only used by the TLBI and low-level switch_mm() code,
|
|
- * neither of which can race with an ASID change. We therefore don't
|
|
- * need to reload the counter using atomic64_read().
|
|
+ * We use atomic64_read() here because the ASID for an 'mm_struct' can
|
|
+ * be reallocated when scheduling one of its threads following a
|
|
+ * rollover event (see new_context() and flush_context()). In this case,
|
|
+ * a concurrent TLBI (e.g. via try_to_unmap_one() and ptep_clear_flush())
|
|
+ * may use a stale ASID. This is fine in principle as the new ASID is
|
|
+ * guaranteed to be clean in the TLB, but the TLBI routines have to take
|
|
+ * care to handle the following race:
|
|
+ *
|
|
+ * CPU 0 CPU 1 CPU 2
|
|
+ *
|
|
+ * // ptep_clear_flush(mm)
|
|
+ * xchg_relaxed(pte, 0)
|
|
+ * DSB ISHST
|
|
+ * old = ASID(mm)
|
|
+ * | <rollover>
|
|
+ * | new = new_context(mm)
|
|
+ * \-----------------> atomic_set(mm->context.id, new)
|
|
+ * cpu_switch_mm(mm)
|
|
+ * // Hardware walk of pte using new ASID
|
|
+ * TLBI(old)
|
|
+ *
|
|
+ * In this scenario, the barrier on CPU 0 and the dependency on CPU 1
|
|
+ * ensure that the page-table walker on CPU 1 *must* see the invalid PTE
|
|
+ * written by CPU 0.
|
|
*/
|
|
-#define ASID(mm) ((mm)->context.id.counter & 0xffff)
|
|
+#define ASID(mm) (atomic64_read(&(mm)->context.id) & 0xffff)
|
|
|
|
static inline bool arm64_kernel_unmapped_at_el0(void)
|
|
{
|
|
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
|
|
index cc3f5a33ff9c5..36f02892e1df8 100644
|
|
--- a/arch/arm64/include/asm/tlbflush.h
|
|
+++ b/arch/arm64/include/asm/tlbflush.h
|
|
@@ -245,9 +245,10 @@ static inline void flush_tlb_all(void)
|
|
|
|
static inline void flush_tlb_mm(struct mm_struct *mm)
|
|
{
|
|
- unsigned long asid = __TLBI_VADDR(0, ASID(mm));
|
|
+ unsigned long asid;
|
|
|
|
dsb(ishst);
|
|
+ asid = __TLBI_VADDR(0, ASID(mm));
|
|
__tlbi(aside1is, asid);
|
|
__tlbi_user(aside1is, asid);
|
|
dsb(ish);
|
|
@@ -256,9 +257,10 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
|
|
static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
|
|
unsigned long uaddr)
|
|
{
|
|
- unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
|
|
+ unsigned long addr;
|
|
|
|
dsb(ishst);
|
|
+ addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
|
|
__tlbi(vale1is, addr);
|
|
__tlbi_user(vale1is, addr);
|
|
}
|
|
@@ -283,9 +285,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
|
|
{
|
|
int num = 0;
|
|
int scale = 0;
|
|
- unsigned long asid = ASID(vma->vm_mm);
|
|
- unsigned long addr;
|
|
- unsigned long pages;
|
|
+ unsigned long asid, addr, pages;
|
|
|
|
start = round_down(start, stride);
|
|
end = round_up(end, stride);
|
|
@@ -305,6 +305,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
|
|
}
|
|
|
|
dsb(ishst);
|
|
+ asid = ASID(vma->vm_mm);
|
|
|
|
/*
|
|
* When the CPU does not support TLB range operations, flush the TLB
|
|
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
|
|
index 78cdd6b24172c..f9119eea735e2 100644
|
|
--- a/arch/arm64/kernel/head.S
|
|
+++ b/arch/arm64/kernel/head.S
|
|
@@ -191,7 +191,7 @@ SYM_CODE_END(preserve_boot_args)
|
|
* to be composed of multiple pages. (This effectively scales the end index).
|
|
*
|
|
* vstart: virtual address of start of range
|
|
- * vend: virtual address of end of range
|
|
+ * vend: virtual address of end of range - we map [vstart, vend]
|
|
* shift: shift used to transform virtual address into index
|
|
* ptrs: number of entries in page table
|
|
* istart: index in table corresponding to vstart
|
|
@@ -228,17 +228,18 @@ SYM_CODE_END(preserve_boot_args)
|
|
*
|
|
* tbl: location of page table
|
|
* rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE)
|
|
- * vstart: start address to map
|
|
- * vend: end address to map - we map [vstart, vend]
|
|
+ * vstart: virtual address of start of range
|
|
+ * vend: virtual address of end of range - we map [vstart, vend - 1]
|
|
* flags: flags to use to map last level entries
|
|
* phys: physical address corresponding to vstart - physical memory is contiguous
|
|
* pgds: the number of pgd entries
|
|
*
|
|
* Temporaries: istart, iend, tmp, count, sv - these need to be different registers
|
|
- * Preserves: vstart, vend, flags
|
|
- * Corrupts: tbl, rtbl, istart, iend, tmp, count, sv
|
|
+ * Preserves: vstart, flags
|
|
+ * Corrupts: tbl, rtbl, vend, istart, iend, tmp, count, sv
|
|
*/
|
|
.macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
|
|
+ sub \vend, \vend, #1
|
|
add \rtbl, \tbl, #PAGE_SIZE
|
|
mov \sv, \rtbl
|
|
mov \count, #0
|
|
diff --git a/arch/m68k/Kconfig.bus b/arch/m68k/Kconfig.bus
|
|
index f1be832e2b746..d1e93a39cd3bc 100644
|
|
--- a/arch/m68k/Kconfig.bus
|
|
+++ b/arch/m68k/Kconfig.bus
|
|
@@ -63,7 +63,7 @@ source "drivers/zorro/Kconfig"
|
|
|
|
endif
|
|
|
|
-if !MMU
|
|
+if COLDFIRE
|
|
|
|
config ISA_DMA_API
|
|
def_bool !M5272
|
|
diff --git a/arch/mips/mti-malta/malta-dtshim.c b/arch/mips/mti-malta/malta-dtshim.c
|
|
index 0ddf03df62688..f451268f6c384 100644
|
|
--- a/arch/mips/mti-malta/malta-dtshim.c
|
|
+++ b/arch/mips/mti-malta/malta-dtshim.c
|
|
@@ -22,7 +22,7 @@
|
|
#define ROCIT_CONFIG_GEN1_MEMMAP_SHIFT 8
|
|
#define ROCIT_CONFIG_GEN1_MEMMAP_MASK (0xf << 8)
|
|
|
|
-static unsigned char fdt_buf[16 << 10] __initdata;
|
|
+static unsigned char fdt_buf[16 << 10] __initdata __aligned(8);
|
|
|
|
/* determined physical memory size, not overridden by command line args */
|
|
extern unsigned long physical_memsize;
|
|
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
|
|
index bc657e55c15f8..98e4f97db5159 100644
|
|
--- a/arch/openrisc/kernel/entry.S
|
|
+++ b/arch/openrisc/kernel/entry.S
|
|
@@ -547,6 +547,7 @@ EXCEPTION_ENTRY(_external_irq_handler)
|
|
l.bnf 1f // ext irq enabled, all ok.
|
|
l.nop
|
|
|
|
+#ifdef CONFIG_PRINTK
|
|
l.addi r1,r1,-0x8
|
|
l.movhi r3,hi(42f)
|
|
l.ori r3,r3,lo(42f)
|
|
@@ -560,6 +561,7 @@ EXCEPTION_ENTRY(_external_irq_handler)
|
|
.string "\n\rESR interrupt bug: in _external_irq_handler (ESR %x)\n\r"
|
|
.align 4
|
|
.previous
|
|
+#endif
|
|
|
|
l.ori r4,r4,SPR_SR_IEE // fix the bug
|
|
// l.sw PT_SR(r1),r4
|
|
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
|
|
index 9f43eaeb0b0af..8d6c9b88eb3f2 100644
|
|
--- a/arch/parisc/kernel/signal.c
|
|
+++ b/arch/parisc/kernel/signal.c
|
|
@@ -237,6 +237,12 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs,
|
|
#endif
|
|
|
|
usp = (regs->gr[30] & ~(0x01UL));
|
|
+#ifdef CONFIG_64BIT
|
|
+ if (is_compat_task()) {
|
|
+ /* The gcc alloca implementation leaves garbage in the upper 32 bits of sp */
|
|
+ usp = (compat_uint_t)usp;
|
|
+ }
|
|
+#endif
|
|
/*FIXME: frame_size parameter is unused, remove it. */
|
|
frame = get_sigframe(&ksig->ka, usp, sizeof(*frame));
|
|
|
|
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
|
|
index 949ff9ccda5e7..dbf3ff8adc654 100644
|
|
--- a/arch/powerpc/configs/mpc885_ads_defconfig
|
|
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
|
|
@@ -34,6 +34,7 @@ CONFIG_MTD_CFI_GEOMETRY=y
|
|
# CONFIG_MTD_CFI_I2 is not set
|
|
CONFIG_MTD_CFI_I4=y
|
|
CONFIG_MTD_CFI_AMDSTD=y
|
|
+CONFIG_MTD_PHYSMAP=y
|
|
CONFIG_MTD_PHYSMAP_OF=y
|
|
# CONFIG_BLK_DEV is not set
|
|
CONFIG_NETDEVICES=y
|
|
diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h
|
|
index c6bbe9778d3cd..3c09109e708ef 100644
|
|
--- a/arch/powerpc/include/asm/pmc.h
|
|
+++ b/arch/powerpc/include/asm/pmc.h
|
|
@@ -34,6 +34,13 @@ static inline void ppc_set_pmu_inuse(int inuse)
|
|
#endif
|
|
}
|
|
|
|
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
|
+static inline int ppc_get_pmu_inuse(void)
|
|
+{
|
|
+ return get_paca()->pmcregs_in_use;
|
|
+}
|
|
+#endif
|
|
+
|
|
extern void power4_enable_pmcs(void);
|
|
|
|
#else /* CONFIG_PPC64 */
|
|
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
|
|
index 26a028a9233af..91f274134884e 100644
|
|
--- a/arch/powerpc/kernel/smp.c
|
|
+++ b/arch/powerpc/kernel/smp.c
|
|
@@ -1385,6 +1385,7 @@ static void add_cpu_to_masks(int cpu)
|
|
* add it to it's own thread sibling mask.
|
|
*/
|
|
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
|
|
+ cpumask_set_cpu(cpu, cpu_core_mask(cpu));
|
|
|
|
for (i = first_thread; i < first_thread + threads_per_core; i++)
|
|
if (cpu_online(i))
|
|
@@ -1399,11 +1400,6 @@ static void add_cpu_to_masks(int cpu)
|
|
if (has_coregroup_support())
|
|
update_coregroup_mask(cpu, &mask);
|
|
|
|
- if (chip_id == -1 || !ret) {
|
|
- cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
|
|
- goto out;
|
|
- }
|
|
-
|
|
if (shared_caches)
|
|
submask_fn = cpu_l2_cache_mask;
|
|
|
|
@@ -1413,6 +1409,10 @@ static void add_cpu_to_masks(int cpu)
|
|
/* Skip all CPUs already part of current CPU core mask */
|
|
cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
|
|
|
|
+ /* If chip_id is -1; limit the cpu_core_mask to within DIE*/
|
|
+ if (chip_id == -1)
|
|
+ cpumask_and(mask, mask, cpu_cpu_mask(cpu));
|
|
+
|
|
for_each_cpu(i, mask) {
|
|
if (chip_id == cpu_to_chip_id(i)) {
|
|
or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
|
|
@@ -1422,7 +1422,6 @@ static void add_cpu_to_masks(int cpu)
|
|
}
|
|
}
|
|
|
|
-out:
|
|
free_cpumask_var(mask);
|
|
}
|
|
|
|
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
|
|
index 2f926ea9b7b94..d4a66ce93f522 100644
|
|
--- a/arch/powerpc/kernel/stacktrace.c
|
|
+++ b/arch/powerpc/kernel/stacktrace.c
|
|
@@ -8,6 +8,7 @@
|
|
* Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
|
|
*/
|
|
|
|
+#include <linux/delay.h>
|
|
#include <linux/export.h>
|
|
#include <linux/kallsyms.h>
|
|
#include <linux/module.h>
|
|
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
|
|
index bb35490400e99..04028f905e50e 100644
|
|
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
|
|
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
|
|
@@ -64,10 +64,12 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
|
|
}
|
|
isync();
|
|
|
|
+ pagefault_disable();
|
|
if (is_load)
|
|
- ret = copy_from_user_nofault(to, (const void __user *)from, n);
|
|
+ ret = __copy_from_user_inatomic(to, (const void __user *)from, n);
|
|
else
|
|
- ret = copy_to_user_nofault((void __user *)to, from, n);
|
|
+ ret = __copy_to_user_inatomic((void __user *)to, from, n);
|
|
+ pagefault_enable();
|
|
|
|
/* switch the pid first to avoid running host with unallocated pid */
|
|
if (quadrant == 1 && pid != old_pid)
|
|
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
|
|
index 083a4e037718d..e5ba96c41f3fc 100644
|
|
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
|
|
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
|
|
@@ -173,10 +173,13 @@ static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt,
|
|
idx -= stt->offset;
|
|
page = stt->pages[idx / TCES_PER_PAGE];
|
|
/*
|
|
- * page must not be NULL in real mode,
|
|
- * kvmppc_rm_ioba_validate() must have taken care of this.
|
|
+ * kvmppc_rm_ioba_validate() allows pages not be allocated if TCE is
|
|
+ * being cleared, otherwise it returns H_TOO_HARD and we skip this.
|
|
*/
|
|
- WARN_ON_ONCE_RM(!page);
|
|
+ if (!page) {
|
|
+ WARN_ON_ONCE_RM(tce != 0);
|
|
+ return;
|
|
+ }
|
|
tbl = kvmppc_page_address(page);
|
|
|
|
tbl[idx % TCES_PER_PAGE] = tce;
|
|
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
|
|
index bd7350a608d4b..175967a195c44 100644
|
|
--- a/arch/powerpc/kvm/book3s_hv.c
|
|
+++ b/arch/powerpc/kvm/book3s_hv.c
|
|
@@ -58,6 +58,7 @@
|
|
#include <asm/kvm_book3s.h>
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/lppaca.h>
|
|
+#include <asm/pmc.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/cputhreads.h>
|
|
#include <asm/page.h>
|
|
@@ -3619,6 +3620,18 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
|
|
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
|
|
kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
|
|
|
|
+#ifdef CONFIG_PPC_PSERIES
|
|
+ if (kvmhv_on_pseries()) {
|
|
+ barrier();
|
|
+ if (vcpu->arch.vpa.pinned_addr) {
|
|
+ struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
|
|
+ get_lppaca()->pmcregs_in_use = lp->pmcregs_in_use;
|
|
+ } else {
|
|
+ get_lppaca()->pmcregs_in_use = 1;
|
|
+ }
|
|
+ barrier();
|
|
+ }
|
|
+#endif
|
|
kvmhv_load_guest_pmu(vcpu);
|
|
|
|
msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
|
|
@@ -3756,6 +3769,13 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
|
|
save_pmu |= nesting_enabled(vcpu->kvm);
|
|
|
|
kvmhv_save_guest_pmu(vcpu, save_pmu);
|
|
+#ifdef CONFIG_PPC_PSERIES
|
|
+ if (kvmhv_on_pseries()) {
|
|
+ barrier();
|
|
+ get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
|
|
+ barrier();
|
|
+ }
|
|
+#endif
|
|
|
|
vc->entry_exit_map = 0x101;
|
|
vc->in_guest = 0;
|
|
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
|
|
index f2bf98bdcea28..094a1076fd1fe 100644
|
|
--- a/arch/powerpc/mm/numa.c
|
|
+++ b/arch/powerpc/mm/numa.c
|
|
@@ -893,7 +893,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
|
|
static void __init find_possible_nodes(void)
|
|
{
|
|
struct device_node *rtas;
|
|
- const __be32 *domains;
|
|
+ const __be32 *domains = NULL;
|
|
int prop_length, max_nodes;
|
|
u32 i;
|
|
|
|
@@ -909,9 +909,14 @@ static void __init find_possible_nodes(void)
|
|
* it doesn't exist, then fallback on ibm,max-associativity-domains.
|
|
* Current denotes what the platform can support compared to max
|
|
* which denotes what the Hypervisor can support.
|
|
+ *
|
|
+ * If the LPAR is migratable, new nodes might be activated after a LPM,
|
|
+ * so we should consider the max number in that case.
|
|
*/
|
|
- domains = of_get_property(rtas, "ibm,current-associativity-domains",
|
|
- &prop_length);
|
|
+ if (!of_get_property(of_root, "ibm,migratable-partition", NULL))
|
|
+ domains = of_get_property(rtas,
|
|
+ "ibm,current-associativity-domains",
|
|
+ &prop_length);
|
|
if (!domains) {
|
|
domains = of_get_property(rtas, "ibm,max-associativity-domains",
|
|
&prop_length);
|
|
@@ -920,6 +925,8 @@ static void __init find_possible_nodes(void)
|
|
}
|
|
|
|
max_nodes = of_read_number(&domains[min_common_depth], 1);
|
|
+ pr_info("Partition configured for %d NUMA nodes.\n", max_nodes);
|
|
+
|
|
for (i = 0; i < max_nodes; i++) {
|
|
if (!node_possible(i))
|
|
node_set(i, node_possible_map);
|
|
diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
|
|
index d48413e28c39e..c756228a081fb 100644
|
|
--- a/arch/powerpc/perf/hv-gpci.c
|
|
+++ b/arch/powerpc/perf/hv-gpci.c
|
|
@@ -175,7 +175,7 @@ static unsigned long single_gpci_request(u32 req, u32 starting_index,
|
|
*/
|
|
count = 0;
|
|
for (i = offset; i < offset + length; i++)
|
|
- count |= arg->bytes[i] << (i - offset);
|
|
+ count |= (u64)(arg->bytes[i]) << ((length - 1 - (i - offset)) * 8);
|
|
|
|
*value = count;
|
|
out:
|
|
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
|
|
index bdb242a1544eb..75a2ecec2ab8a 100644
|
|
--- a/arch/s390/include/asm/setup.h
|
|
+++ b/arch/s390/include/asm/setup.h
|
|
@@ -38,6 +38,7 @@
|
|
#define MACHINE_FLAG_NX BIT(15)
|
|
#define MACHINE_FLAG_GS BIT(16)
|
|
#define MACHINE_FLAG_SCC BIT(17)
|
|
+#define MACHINE_FLAG_PCI_MIO BIT(18)
|
|
|
|
#define LPP_MAGIC BIT(31)
|
|
#define LPP_PID_MASK _AC(0xffffffff, UL)
|
|
@@ -113,6 +114,7 @@ extern unsigned long mio_wb_bit_mask;
|
|
#define MACHINE_HAS_NX (S390_lowcore.machine_flags & MACHINE_FLAG_NX)
|
|
#define MACHINE_HAS_GS (S390_lowcore.machine_flags & MACHINE_FLAG_GS)
|
|
#define MACHINE_HAS_SCC (S390_lowcore.machine_flags & MACHINE_FLAG_SCC)
|
|
+#define MACHINE_HAS_PCI_MIO (S390_lowcore.machine_flags & MACHINE_FLAG_PCI_MIO)
|
|
|
|
/*
|
|
* Console mode. Override with conmode=
|
|
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
|
|
index 705844f739345..985e1e7553336 100644
|
|
--- a/arch/s390/kernel/early.c
|
|
+++ b/arch/s390/kernel/early.c
|
|
@@ -238,6 +238,10 @@ static __init void detect_machine_facilities(void)
|
|
clock_comparator_max = -1ULL >> 1;
|
|
__ctl_set_bit(0, 53);
|
|
}
|
|
+ if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
|
|
+ S390_lowcore.machine_flags |= MACHINE_FLAG_PCI_MIO;
|
|
+ /* the control bit is set during PCI initialization */
|
|
+ }
|
|
}
|
|
|
|
static inline void save_vector_registers(void)
|
|
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
|
|
index ab584e8e35275..9156653b56f69 100644
|
|
--- a/arch/s390/kernel/jump_label.c
|
|
+++ b/arch/s390/kernel/jump_label.c
|
|
@@ -36,7 +36,7 @@ static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
|
|
unsigned char *ipe = (unsigned char *)expected;
|
|
unsigned char *ipn = (unsigned char *)new;
|
|
|
|
- pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
|
|
+ pr_emerg("Jump label code mismatch at %pS [%px]\n", ipc, ipc);
|
|
pr_emerg("Found: %6ph\n", ipc);
|
|
pr_emerg("Expected: %6ph\n", ipe);
|
|
pr_emerg("New: %6ph\n", ipn);
|
|
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
|
|
index 77767850d0d07..9d5960bbc45f2 100644
|
|
--- a/arch/s390/mm/init.c
|
|
+++ b/arch/s390/mm/init.c
|
|
@@ -180,9 +180,9 @@ static void pv_init(void)
|
|
return;
|
|
|
|
/* make sure bounce buffers are shared */
|
|
+ swiotlb_force = SWIOTLB_FORCE;
|
|
swiotlb_init(1);
|
|
swiotlb_update_mem_attributes();
|
|
- swiotlb_force = SWIOTLB_FORCE;
|
|
}
|
|
|
|
void __init mem_init(void)
|
|
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
|
|
index 0ddb1fe353dc8..f5ddbc625c1a5 100644
|
|
--- a/arch/s390/pci/pci.c
|
|
+++ b/arch/s390/pci/pci.c
|
|
@@ -866,7 +866,6 @@ static void zpci_mem_exit(void)
|
|
}
|
|
|
|
static unsigned int s390_pci_probe __initdata = 1;
|
|
-static unsigned int s390_pci_no_mio __initdata;
|
|
unsigned int s390_pci_force_floating __initdata;
|
|
static unsigned int s390_pci_initialized;
|
|
|
|
@@ -877,7 +876,7 @@ char * __init pcibios_setup(char *str)
|
|
return NULL;
|
|
}
|
|
if (!strcmp(str, "nomio")) {
|
|
- s390_pci_no_mio = 1;
|
|
+ S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
|
|
return NULL;
|
|
}
|
|
if (!strcmp(str, "force_floating")) {
|
|
@@ -906,7 +905,7 @@ static int __init pci_base_init(void)
|
|
if (!test_facility(69) || !test_facility(71))
|
|
return 0;
|
|
|
|
- if (test_facility(153) && !s390_pci_no_mio) {
|
|
+ if (MACHINE_HAS_PCI_MIO) {
|
|
static_branch_enable(&have_mio);
|
|
ctl_set_bit(2, 5);
|
|
}
|
|
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
|
|
index 6cc50ab07bded..65d11711cd7bb 100644
|
|
--- a/arch/x86/kernel/cpu/mshyperv.c
|
|
+++ b/arch/x86/kernel/cpu/mshyperv.c
|
|
@@ -322,8 +322,6 @@ static void __init ms_hyperv_init_platform(void)
|
|
if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
|
|
wrmsrl(HV_X64_MSR_TSC_INVARIANT_CONTROL, 0x1);
|
|
setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
|
|
- } else {
|
|
- mark_tsc_unstable("running on Hyper-V");
|
|
}
|
|
|
|
/*
|
|
@@ -382,6 +380,13 @@ static void __init ms_hyperv_init_platform(void)
|
|
/* Register Hyper-V specific clocksource */
|
|
hv_init_clocksource();
|
|
#endif
|
|
+ /*
|
|
+ * TSC should be marked as unstable only after Hyper-V
|
|
+ * clocksource has been initialized. This ensures that the
|
|
+ * stability of the sched_clock is not altered.
|
|
+ */
|
|
+ if (!(ms_hyperv.features & HV_ACCESS_TSC_INVARIANT))
|
|
+ mark_tsc_unstable("running on Hyper-V");
|
|
}
|
|
|
|
const __initconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
|
|
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
|
|
index 56e0f290fef65..e809f14468464 100644
|
|
--- a/arch/x86/xen/p2m.c
|
|
+++ b/arch/x86/xen/p2m.c
|
|
@@ -618,8 +618,8 @@ int xen_alloc_p2m_entry(unsigned long pfn)
|
|
}
|
|
|
|
/* Expanded the p2m? */
|
|
- if (pfn > xen_p2m_last_pfn) {
|
|
- xen_p2m_last_pfn = pfn;
|
|
+ if (pfn >= xen_p2m_last_pfn) {
|
|
+ xen_p2m_last_pfn = ALIGN(pfn + 1, P2M_PER_PAGE);
|
|
HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
|
|
}
|
|
|
|
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
|
|
index af81a62faba64..e7faea3d73d3b 100644
|
|
--- a/arch/xtensa/platforms/iss/console.c
|
|
+++ b/arch/xtensa/platforms/iss/console.c
|
|
@@ -168,9 +168,13 @@ static const struct tty_operations serial_ops = {
|
|
|
|
int __init rs_init(void)
|
|
{
|
|
- tty_port_init(&serial_port);
|
|
+ int ret;
|
|
|
|
serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES);
|
|
+ if (!serial_driver)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ tty_port_init(&serial_port);
|
|
|
|
pr_info("%s %s\n", serial_name, serial_version);
|
|
|
|
@@ -190,8 +194,15 @@ int __init rs_init(void)
|
|
tty_set_operations(serial_driver, &serial_ops);
|
|
tty_port_link_device(&serial_port, serial_driver, 0);
|
|
|
|
- if (tty_register_driver(serial_driver))
|
|
- panic("Couldn't register serial driver\n");
|
|
+ ret = tty_register_driver(serial_driver);
|
|
+ if (ret) {
|
|
+ pr_err("Couldn't register serial driver\n");
|
|
+ tty_driver_kref_put(serial_driver);
|
|
+ tty_port_destroy(&serial_port);
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
|
|
index 8ea37328ca84e..b8c2ddc01aec3 100644
|
|
--- a/block/bfq-iosched.c
|
|
+++ b/block/bfq-iosched.c
|
|
@@ -5011,7 +5011,7 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
|
|
if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
|
|
pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
|
|
bfqq->new_ioprio);
|
|
- bfqq->new_ioprio = IOPRIO_BE_NR;
|
|
+ bfqq->new_ioprio = IOPRIO_BE_NR - 1;
|
|
}
|
|
|
|
bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
|
|
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
|
|
index ab7d7ebcf6ddc..61b452272f94e 100644
|
|
--- a/block/blk-zoned.c
|
|
+++ b/block/blk-zoned.c
|
|
@@ -296,9 +296,6 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
|
|
if (!blk_queue_is_zoned(q))
|
|
return -ENOTTY;
|
|
|
|
- if (!capable(CAP_SYS_ADMIN))
|
|
- return -EACCES;
|
|
-
|
|
if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
|
|
return -EFAULT;
|
|
|
|
@@ -357,9 +354,6 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
|
|
if (!blk_queue_is_zoned(q))
|
|
return -ENOTTY;
|
|
|
|
- if (!capable(CAP_SYS_ADMIN))
|
|
- return -EACCES;
|
|
-
|
|
if (!(mode & FMODE_WRITE))
|
|
return -EBADF;
|
|
|
|
diff --git a/block/bsg.c b/block/bsg.c
|
|
index 3d78e843a83f6..2cbc1fcc8247b 100644
|
|
--- a/block/bsg.c
|
|
+++ b/block/bsg.c
|
|
@@ -371,10 +371,13 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
case SG_GET_RESERVED_SIZE:
|
|
case SG_SET_RESERVED_SIZE:
|
|
case SG_EMULATED_HOST:
|
|
- case SCSI_IOCTL_SEND_COMMAND:
|
|
return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
|
|
case SG_IO:
|
|
return bsg_sg_io(bd->queue, file->f_mode, uarg);
|
|
+ case SCSI_IOCTL_SEND_COMMAND:
|
|
+ pr_warn_ratelimited("%s: calling unsupported SCSI_IOCTL_SEND_COMMAND\n",
|
|
+ current->comm);
|
|
+ return -EINVAL;
|
|
default:
|
|
return -ENOTTY;
|
|
}
|
|
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
|
|
index 44f434acfce08..0e6e73b8023fc 100644
|
|
--- a/drivers/ata/libata-core.c
|
|
+++ b/drivers/ata/libata-core.c
|
|
@@ -3950,6 +3950,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
|
{ "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
|
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
|
+ { "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
|
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
|
+ { "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
|
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
|
{ "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
|
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
|
|
|
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
|
|
index 9dcef6ac643b9..982fe91125322 100644
|
|
--- a/drivers/ata/sata_dwc_460ex.c
|
|
+++ b/drivers/ata/sata_dwc_460ex.c
|
|
@@ -1249,24 +1249,20 @@ static int sata_dwc_probe(struct platform_device *ofdev)
|
|
irq = irq_of_parse_and_map(np, 0);
|
|
if (irq == NO_IRQ) {
|
|
dev_err(&ofdev->dev, "no SATA DMA irq\n");
|
|
- err = -ENODEV;
|
|
- goto error_out;
|
|
+ return -ENODEV;
|
|
}
|
|
|
|
#ifdef CONFIG_SATA_DWC_OLD_DMA
|
|
if (!of_find_property(np, "dmas", NULL)) {
|
|
err = sata_dwc_dma_init_old(ofdev, hsdev);
|
|
if (err)
|
|
- goto error_out;
|
|
+ return err;
|
|
}
|
|
#endif
|
|
|
|
hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy");
|
|
- if (IS_ERR(hsdev->phy)) {
|
|
- err = PTR_ERR(hsdev->phy);
|
|
- hsdev->phy = NULL;
|
|
- goto error_out;
|
|
- }
|
|
+ if (IS_ERR(hsdev->phy))
|
|
+ return PTR_ERR(hsdev->phy);
|
|
|
|
err = phy_init(hsdev->phy);
|
|
if (err)
|
|
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
|
|
index 806766b1b45f6..e329cdd7156c9 100644
|
|
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
|
|
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
|
|
@@ -64,6 +64,8 @@ struct fsl_mc_addr_translation_range {
|
|
#define MC_FAPR_PL BIT(18)
|
|
#define MC_FAPR_BMT BIT(17)
|
|
|
|
+static phys_addr_t mc_portal_base_phys_addr;
|
|
+
|
|
/**
|
|
* fsl_mc_bus_match - device to driver matching callback
|
|
* @dev: the fsl-mc device to match against
|
|
@@ -597,14 +599,30 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
|
|
* If base address is in the region_desc use it otherwise
|
|
* revert to old mechanism
|
|
*/
|
|
- if (region_desc.base_address)
|
|
+ if (region_desc.base_address) {
|
|
regions[i].start = region_desc.base_address +
|
|
region_desc.base_offset;
|
|
- else
|
|
+ } else {
|
|
error = translate_mc_addr(mc_dev, mc_region_type,
|
|
region_desc.base_offset,
|
|
®ions[i].start);
|
|
|
|
+ /*
|
|
+ * Some versions of the MC firmware wrongly report
|
|
+ * 0 for register base address of the DPMCP associated
|
|
+ * with child DPRC objects thus rendering them unusable.
|
|
+ * This is particularly troublesome in ACPI boot
|
|
+ * scenarios where the legacy way of extracting this
|
|
+ * base address from the device tree does not apply.
|
|
+ * Given that DPMCPs share the same base address,
|
|
+ * workaround this by using the base address extracted
|
|
+ * from the root DPRC container.
|
|
+ */
|
|
+ if (is_fsl_mc_bus_dprc(mc_dev) &&
|
|
+ regions[i].start == region_desc.base_offset)
|
|
+ regions[i].start += mc_portal_base_phys_addr;
|
|
+ }
|
|
+
|
|
if (error < 0) {
|
|
dev_err(parent_dev,
|
|
"Invalid MC offset: %#x (for %s.%d\'s region %d)\n",
|
|
@@ -996,6 +1014,8 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
|
|
plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
mc_portal_phys_addr = plat_res->start;
|
|
mc_portal_size = resource_size(plat_res);
|
|
+ mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff;
|
|
+
|
|
error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr,
|
|
mc_portal_size, NULL,
|
|
FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io);
|
|
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
|
|
index b4fc8d71daf20..b656d25a97678 100644
|
|
--- a/drivers/clk/at91/clk-generated.c
|
|
+++ b/drivers/clk/at91/clk-generated.c
|
|
@@ -128,6 +128,12 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
|
|
int i;
|
|
u32 div;
|
|
|
|
+ /* do not look for a rate that is outside of our range */
|
|
+ if (gck->range.max && req->rate > gck->range.max)
|
|
+ req->rate = gck->range.max;
|
|
+ if (gck->range.min && req->rate < gck->range.min)
|
|
+ req->rate = gck->range.min;
|
|
+
|
|
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
|
|
if (gck->chg_pid == i)
|
|
continue;
|
|
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
|
|
index 2c309e3dc8e34..04e728538cefe 100644
|
|
--- a/drivers/clk/imx/clk-composite-8m.c
|
|
+++ b/drivers/clk/imx/clk-composite-8m.c
|
|
@@ -216,7 +216,8 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
|
|
div->width = PCG_PREDIV_WIDTH;
|
|
divider_ops = &imx8m_clk_composite_divider_ops;
|
|
mux_ops = &clk_mux_ops;
|
|
- flags |= CLK_SET_PARENT_GATE;
|
|
+ if (!(composite_flags & IMX_COMPOSITE_FW_MANAGED))
|
|
+ flags |= CLK_SET_PARENT_GATE;
|
|
}
|
|
|
|
div->lock = &imx_ccm_lock;
|
|
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
|
|
index 4cbf86ab2eacf..711bd2294c70b 100644
|
|
--- a/drivers/clk/imx/clk-imx8mm.c
|
|
+++ b/drivers/clk/imx/clk-imx8mm.c
|
|
@@ -458,10 +458,11 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
|
|
|
|
/*
|
|
* DRAM clocks are manipulated from TF-A outside clock framework.
|
|
- * Mark with GET_RATE_NOCACHE to always read div value from hardware
|
|
+ * The fw_managed helper sets GET_RATE_NOCACHE and clears SET_PARENT_GATE
|
|
+ * as div value should always be read from hardware
|
|
*/
|
|
- hws[IMX8MM_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mm_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
|
|
- hws[IMX8MM_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mm_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
|
|
+ hws[IMX8MM_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mm_dram_alt_sels, base + 0xa000);
|
|
+ hws[IMX8MM_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mm_dram_apb_sels, base + 0xa080);
|
|
|
|
/* IP */
|
|
hws[IMX8MM_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mm_vpu_g1_sels, base + 0xa100);
|
|
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
|
|
index f98f252795396..33a7ddc23cd24 100644
|
|
--- a/drivers/clk/imx/clk-imx8mn.c
|
|
+++ b/drivers/clk/imx/clk-imx8mn.c
|
|
@@ -441,10 +441,11 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
|
|
|
|
/*
|
|
* DRAM clocks are manipulated from TF-A outside clock framework.
|
|
- * Mark with GET_RATE_NOCACHE to always read div value from hardware
|
|
+ * The fw_managed helper sets GET_RATE_NOCACHE and clears SET_PARENT_GATE
|
|
+ * as div value should always be read from hardware
|
|
*/
|
|
- hws[IMX8MN_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mn_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
|
|
- hws[IMX8MN_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mn_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
|
|
+ hws[IMX8MN_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mn_dram_alt_sels, base + 0xa000);
|
|
+ hws[IMX8MN_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mn_dram_apb_sels, base + 0xa080);
|
|
|
|
hws[IMX8MN_CLK_DISP_PIXEL] = imx8m_clk_hw_composite("disp_pixel", imx8mn_disp_pixel_sels, base + 0xa500);
|
|
hws[IMX8MN_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mn_sai2_sels, base + 0xa600);
|
|
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
|
|
index aac6bcc65c20c..f679e5cc320b5 100644
|
|
--- a/drivers/clk/imx/clk-imx8mq.c
|
|
+++ b/drivers/clk/imx/clk-imx8mq.c
|
|
@@ -427,11 +427,12 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
|
|
|
|
/*
|
|
* DRAM clocks are manipulated from TF-A outside clock framework.
|
|
- * Mark with GET_RATE_NOCACHE to always read div value from hardware
|
|
+ * The fw_managed helper sets GET_RATE_NOCACHE and clears SET_PARENT_GATE
|
|
+ * as div value should always be read from hardware
|
|
*/
|
|
hws[IMX8MQ_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mq_dram_core_sels, ARRAY_SIZE(imx8mq_dram_core_sels), CLK_IS_CRITICAL);
|
|
- hws[IMX8MQ_CLK_DRAM_ALT] = __imx8m_clk_hw_composite("dram_alt", imx8mq_dram_alt_sels, base + 0xa000, CLK_GET_RATE_NOCACHE);
|
|
- hws[IMX8MQ_CLK_DRAM_APB] = __imx8m_clk_hw_composite("dram_apb", imx8mq_dram_apb_sels, base + 0xa080, CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE);
|
|
+ hws[IMX8MQ_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mq_dram_alt_sels, base + 0xa000);
|
|
+ hws[IMX8MQ_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mq_dram_apb_sels, base + 0xa080);
|
|
|
|
/* IP */
|
|
hws[IMX8MQ_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mq_vpu_g1_sels, base + 0xa100);
|
|
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
|
|
index f04cbbab9fccd..c66e00e877114 100644
|
|
--- a/drivers/clk/imx/clk.h
|
|
+++ b/drivers/clk/imx/clk.h
|
|
@@ -533,8 +533,9 @@ struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name,
|
|
struct clk *div, struct clk *mux, struct clk *pll,
|
|
struct clk *step);
|
|
|
|
-#define IMX_COMPOSITE_CORE BIT(0)
|
|
-#define IMX_COMPOSITE_BUS BIT(1)
|
|
+#define IMX_COMPOSITE_CORE BIT(0)
|
|
+#define IMX_COMPOSITE_BUS BIT(1)
|
|
+#define IMX_COMPOSITE_FW_MANAGED BIT(2)
|
|
|
|
struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
|
|
const char * const *parent_names,
|
|
@@ -570,6 +571,17 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
|
|
ARRAY_SIZE(parent_names), reg, 0, \
|
|
flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
|
|
|
|
+#define __imx8m_clk_hw_fw_managed_composite(name, parent_names, reg, flags) \
|
|
+ imx8m_clk_hw_composite_flags(name, parent_names, \
|
|
+ ARRAY_SIZE(parent_names), reg, IMX_COMPOSITE_FW_MANAGED, \
|
|
+ flags | CLK_GET_RATE_NOCACHE | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
|
|
+
|
|
+#define imx8m_clk_hw_fw_managed_composite(name, parent_names, reg) \
|
|
+ __imx8m_clk_hw_fw_managed_composite(name, parent_names, reg, 0)
|
|
+
|
|
+#define imx8m_clk_hw_fw_managed_composite_critical(name, parent_names, reg) \
|
|
+ __imx8m_clk_hw_fw_managed_composite(name, parent_names, reg, CLK_IS_CRITICAL)
|
|
+
|
|
#define __imx8m_clk_composite(name, parent_names, reg, flags) \
|
|
to_clk(__imx8m_clk_hw_composite(name, parent_names, reg, flags))
|
|
|
|
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
|
|
index 4c6c9167ef509..bbbf9ce428672 100644
|
|
--- a/drivers/clk/rockchip/clk-pll.c
|
|
+++ b/drivers/clk/rockchip/clk-pll.c
|
|
@@ -940,7 +940,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
|
|
switch (pll_type) {
|
|
case pll_rk3036:
|
|
case pll_rk3328:
|
|
- if (!pll->rate_table || IS_ERR(ctx->grf))
|
|
+ if (!pll->rate_table)
|
|
init.ops = &rockchip_rk3036_pll_clk_norate_ops;
|
|
else
|
|
init.ops = &rockchip_rk3036_pll_clk_ops;
|
|
diff --git a/drivers/clk/socfpga/clk-agilex.c b/drivers/clk/socfpga/clk-agilex.c
|
|
index 438075a50b9f2..7182afb4258a7 100644
|
|
--- a/drivers/clk/socfpga/clk-agilex.c
|
|
+++ b/drivers/clk/socfpga/clk-agilex.c
|
|
@@ -107,10 +107,10 @@ static const struct clk_parent_data gpio_db_free_mux[] = {
|
|
};
|
|
|
|
static const struct clk_parent_data psi_ref_free_mux[] = {
|
|
- { .fw_name = "main_pll_c3",
|
|
- .name = "main_pll_c3", },
|
|
- { .fw_name = "peri_pll_c3",
|
|
- .name = "peri_pll_c3", },
|
|
+ { .fw_name = "main_pll_c2",
|
|
+ .name = "main_pll_c2", },
|
|
+ { .fw_name = "peri_pll_c2",
|
|
+ .name = "peri_pll_c2", },
|
|
{ .fw_name = "osc1",
|
|
.name = "osc1", },
|
|
{ .fw_name = "cb-intosc-hs-div2-clk",
|
|
@@ -193,6 +193,13 @@ static const struct clk_parent_data sdmmc_mux[] = {
|
|
.name = "boot_clk", },
|
|
};
|
|
|
|
+static const struct clk_parent_data s2f_user0_mux[] = {
|
|
+ { .fw_name = "s2f_user0_free_clk",
|
|
+ .name = "s2f_user0_free_clk", },
|
|
+ { .fw_name = "boot_clk",
|
|
+ .name = "boot_clk", },
|
|
+};
|
|
+
|
|
static const struct clk_parent_data s2f_user1_mux[] = {
|
|
{ .fw_name = "s2f_user1_free_clk",
|
|
.name = "s2f_user1_free_clk", },
|
|
@@ -260,7 +267,7 @@ static const struct stratix10_perip_cnt_clock agilex_main_perip_cnt_clks[] = {
|
|
{ AGILEX_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux,
|
|
ARRAY_SIZE(sdmmc_free_mux), 0, 0xE4, 0, 0, 0},
|
|
{ AGILEX_S2F_USER0_FREE_CLK, "s2f_user0_free_clk", NULL, s2f_usr0_free_mux,
|
|
- ARRAY_SIZE(s2f_usr0_free_mux), 0, 0xE8, 0, 0, 0},
|
|
+ ARRAY_SIZE(s2f_usr0_free_mux), 0, 0xE8, 0, 0x30, 2},
|
|
{ AGILEX_S2F_USER1_FREE_CLK, "s2f_user1_free_clk", NULL, s2f_usr1_free_mux,
|
|
ARRAY_SIZE(s2f_usr1_free_mux), 0, 0xEC, 0, 0x88, 5},
|
|
{ AGILEX_PSI_REF_FREE_CLK, "psi_ref_free_clk", NULL, psi_ref_free_mux,
|
|
@@ -306,6 +313,8 @@ static const struct stratix10_gate_clock agilex_gate_clks[] = {
|
|
4, 0x98, 0, 16, 0x88, 3, 0},
|
|
{ AGILEX_SDMMC_CLK, "sdmmc_clk", NULL, sdmmc_mux, ARRAY_SIZE(sdmmc_mux), 0, 0x7C,
|
|
5, 0, 0, 0, 0x88, 4, 4},
|
|
+ { AGILEX_S2F_USER0_CLK, "s2f_user0_clk", NULL, s2f_user0_mux, ARRAY_SIZE(s2f_user0_mux), 0, 0x24,
|
|
+ 6, 0, 0, 0, 0x30, 2, 0},
|
|
{ AGILEX_S2F_USER1_CLK, "s2f_user1_clk", NULL, s2f_user1_mux, ARRAY_SIZE(s2f_user1_mux), 0, 0x7C,
|
|
6, 0, 0, 0, 0x88, 5, 0},
|
|
{ AGILEX_PSI_REF_CLK, "psi_ref_clk", NULL, psi_mux, ARRAY_SIZE(psi_mux), 0, 0x7C,
|
|
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
|
|
index e439b43c19ebe..8977e4de59157 100644
|
|
--- a/drivers/cpufreq/powernv-cpufreq.c
|
|
+++ b/drivers/cpufreq/powernv-cpufreq.c
|
|
@@ -36,6 +36,7 @@
|
|
#define MAX_PSTATE_SHIFT 32
|
|
#define LPSTATE_SHIFT 48
|
|
#define GPSTATE_SHIFT 56
|
|
+#define MAX_NR_CHIPS 32
|
|
|
|
#define MAX_RAMP_DOWN_TIME 5120
|
|
/*
|
|
@@ -1051,12 +1052,20 @@ static int init_chip_info(void)
|
|
unsigned int *chip;
|
|
unsigned int cpu, i;
|
|
unsigned int prev_chip_id = UINT_MAX;
|
|
+ cpumask_t *chip_cpu_mask;
|
|
int ret = 0;
|
|
|
|
chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL);
|
|
if (!chip)
|
|
return -ENOMEM;
|
|
|
|
+ /* Allocate a chip cpu mask large enough to fit mask for all chips */
|
|
+ chip_cpu_mask = kcalloc(MAX_NR_CHIPS, sizeof(cpumask_t), GFP_KERNEL);
|
|
+ if (!chip_cpu_mask) {
|
|
+ ret = -ENOMEM;
|
|
+ goto free_and_return;
|
|
+ }
|
|
+
|
|
for_each_possible_cpu(cpu) {
|
|
unsigned int id = cpu_to_chip_id(cpu);
|
|
|
|
@@ -1064,22 +1073,25 @@ static int init_chip_info(void)
|
|
prev_chip_id = id;
|
|
chip[nr_chips++] = id;
|
|
}
|
|
+ cpumask_set_cpu(cpu, &chip_cpu_mask[nr_chips-1]);
|
|
}
|
|
|
|
chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
|
|
if (!chips) {
|
|
ret = -ENOMEM;
|
|
- goto free_and_return;
|
|
+ goto out_free_chip_cpu_mask;
|
|
}
|
|
|
|
for (i = 0; i < nr_chips; i++) {
|
|
chips[i].id = chip[i];
|
|
- cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
|
|
+ cpumask_copy(&chips[i].mask, &chip_cpu_mask[i]);
|
|
INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
|
|
for_each_cpu(cpu, &chips[i].mask)
|
|
per_cpu(chip_info, cpu) = &chips[i];
|
|
}
|
|
|
|
+out_free_chip_cpu_mask:
|
|
+ kfree(chip_cpu_mask);
|
|
free_and_return:
|
|
kfree(chip);
|
|
return ret;
|
|
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
|
|
index a2b5c6f60cf0e..ff164dec8422e 100644
|
|
--- a/drivers/cpuidle/cpuidle-pseries.c
|
|
+++ b/drivers/cpuidle/cpuidle-pseries.c
|
|
@@ -402,7 +402,7 @@ static void __init fixup_cede0_latency(void)
|
|
* pseries_idle_probe()
|
|
* Choose state table for shared versus dedicated partition
|
|
*/
|
|
-static int pseries_idle_probe(void)
|
|
+static int __init pseries_idle_probe(void)
|
|
{
|
|
|
|
if (cpuidle_disable != IDLE_NO_OVERRIDE)
|
|
@@ -419,7 +419,21 @@ static int pseries_idle_probe(void)
|
|
cpuidle_state_table = shared_states;
|
|
max_idle_state = ARRAY_SIZE(shared_states);
|
|
} else {
|
|
- fixup_cede0_latency();
|
|
+ /*
|
|
+ * Use firmware provided latency values
|
|
+ * starting with POWER10 platforms. In the
|
|
+ * case that we are running on a POWER10
|
|
+ * platform but in an earlier compat mode, we
|
|
+ * can still use the firmware provided values.
|
|
+ *
|
|
+ * However, on platforms prior to POWER10, we
|
|
+ * cannot rely on the accuracy of the firmware
|
|
+ * provided latency values. On such platforms,
|
|
+ * go with the conservative default estimate
|
|
+ * of 10us.
|
|
+ */
|
|
+ if (cpu_has_feature(CPU_FTR_ARCH_31) || pvr_version_is(PVR_POWER10))
|
|
+ fixup_cede0_latency();
|
|
cpuidle_state_table = dedicated_states;
|
|
max_idle_state = NR_DEDICATED_STATES;
|
|
}
|
|
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
|
|
index d0018794e92e8..57b57d4db500c 100644
|
|
--- a/drivers/crypto/ccp/sev-dev.c
|
|
+++ b/drivers/crypto/ccp/sev-dev.c
|
|
@@ -278,6 +278,9 @@ static int __sev_platform_shutdown_locked(int *error)
|
|
struct sev_device *sev = psp_master->sev_data;
|
|
int ret;
|
|
|
|
+ if (sev->state == SEV_STATE_UNINIT)
|
|
+ return 0;
|
|
+
|
|
ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
|
|
if (ret)
|
|
return ret;
|
|
@@ -1018,6 +1021,20 @@ e_err:
|
|
return ret;
|
|
}
|
|
|
|
+static void sev_firmware_shutdown(struct sev_device *sev)
|
|
+{
|
|
+ sev_platform_shutdown(NULL);
|
|
+
|
|
+ if (sev_es_tmr) {
|
|
+ /* The TMR area was encrypted, flush it from the cache */
|
|
+ wbinvd_on_all_cpus();
|
|
+
|
|
+ free_pages((unsigned long)sev_es_tmr,
|
|
+ get_order(SEV_ES_TMR_SIZE));
|
|
+ sev_es_tmr = NULL;
|
|
+ }
|
|
+}
|
|
+
|
|
void sev_dev_destroy(struct psp_device *psp)
|
|
{
|
|
struct sev_device *sev = psp->sev_data;
|
|
@@ -1025,6 +1042,8 @@ void sev_dev_destroy(struct psp_device *psp)
|
|
if (!sev)
|
|
return;
|
|
|
|
+ sev_firmware_shutdown(sev);
|
|
+
|
|
if (sev->misc)
|
|
kref_put(&misc_dev->refcount, sev_exit);
|
|
|
|
@@ -1055,21 +1074,6 @@ void sev_pci_init(void)
|
|
if (sev_get_api_version())
|
|
goto err;
|
|
|
|
- /*
|
|
- * If platform is not in UNINIT state then firmware upgrade and/or
|
|
- * platform INIT command will fail. These command require UNINIT state.
|
|
- *
|
|
- * In a normal boot we should never run into case where the firmware
|
|
- * is not in UNINIT state on boot. But in case of kexec boot, a reboot
|
|
- * may not go through a typical shutdown sequence and may leave the
|
|
- * firmware in INIT or WORKING state.
|
|
- */
|
|
-
|
|
- if (sev->state != SEV_STATE_UNINIT) {
|
|
- sev_platform_shutdown(NULL);
|
|
- sev->state = SEV_STATE_UNINIT;
|
|
- }
|
|
-
|
|
if (sev_version_greater_or_equal(0, 15) &&
|
|
sev_update_firmware(sev->dev) == 0)
|
|
sev_get_api_version();
|
|
@@ -1114,17 +1118,10 @@ err:
|
|
|
|
void sev_pci_exit(void)
|
|
{
|
|
- if (!psp_master->sev_data)
|
|
- return;
|
|
-
|
|
- sev_platform_shutdown(NULL);
|
|
+ struct sev_device *sev = psp_master->sev_data;
|
|
|
|
- if (sev_es_tmr) {
|
|
- /* The TMR area was encrypted, flush it from the cache */
|
|
- wbinvd_on_all_cpus();
|
|
+ if (!sev)
|
|
+ return;
|
|
|
|
- free_pages((unsigned long)sev_es_tmr,
|
|
- get_order(SEV_ES_TMR_SIZE));
|
|
- sev_es_tmr = NULL;
|
|
- }
|
|
+ sev_firmware_shutdown(sev);
|
|
}
|
|
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
|
|
index 7d346d842a39e..c319e7e3917dc 100644
|
|
--- a/drivers/crypto/ccp/sp-pci.c
|
|
+++ b/drivers/crypto/ccp/sp-pci.c
|
|
@@ -241,6 +241,17 @@ e_err:
|
|
return ret;
|
|
}
|
|
|
|
+static void sp_pci_shutdown(struct pci_dev *pdev)
|
|
+{
|
|
+ struct device *dev = &pdev->dev;
|
|
+ struct sp_device *sp = dev_get_drvdata(dev);
|
|
+
|
|
+ if (!sp)
|
|
+ return;
|
|
+
|
|
+ sp_destroy(sp);
|
|
+}
|
|
+
|
|
static void sp_pci_remove(struct pci_dev *pdev)
|
|
{
|
|
struct device *dev = &pdev->dev;
|
|
@@ -370,6 +381,7 @@ static struct pci_driver sp_pci_driver = {
|
|
.id_table = sp_pci_table,
|
|
.probe = sp_pci_probe,
|
|
.remove = sp_pci_remove,
|
|
+ .shutdown = sp_pci_shutdown,
|
|
.driver.pm = &sp_pci_pm_ops,
|
|
};
|
|
|
|
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
|
|
index 7daed8b78ac83..5edc91cdb4e65 100644
|
|
--- a/drivers/crypto/mxs-dcp.c
|
|
+++ b/drivers/crypto/mxs-dcp.c
|
|
@@ -299,21 +299,20 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
|
|
|
|
struct scatterlist *dst = req->dst;
|
|
struct scatterlist *src = req->src;
|
|
- const int nents = sg_nents(req->src);
|
|
+ int dst_nents = sg_nents(dst);
|
|
|
|
const int out_off = DCP_BUF_SZ;
|
|
uint8_t *in_buf = sdcp->coh->aes_in_buf;
|
|
uint8_t *out_buf = sdcp->coh->aes_out_buf;
|
|
|
|
- uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
|
|
uint32_t dst_off = 0;
|
|
+ uint8_t *src_buf = NULL;
|
|
uint32_t last_out_len = 0;
|
|
|
|
uint8_t *key = sdcp->coh->aes_key;
|
|
|
|
int ret = 0;
|
|
- int split = 0;
|
|
- unsigned int i, len, clen, rem = 0, tlen = 0;
|
|
+ unsigned int i, len, clen, tlen = 0;
|
|
int init = 0;
|
|
bool limit_hit = false;
|
|
|
|
@@ -331,7 +330,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
|
|
memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
|
|
}
|
|
|
|
- for_each_sg(req->src, src, nents, i) {
|
|
+ for_each_sg(req->src, src, sg_nents(src), i) {
|
|
src_buf = sg_virt(src);
|
|
len = sg_dma_len(src);
|
|
tlen += len;
|
|
@@ -356,34 +355,17 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
|
|
* submit the buffer.
|
|
*/
|
|
if (actx->fill == out_off || sg_is_last(src) ||
|
|
- limit_hit) {
|
|
+ limit_hit) {
|
|
ret = mxs_dcp_run_aes(actx, req, init);
|
|
if (ret)
|
|
return ret;
|
|
init = 0;
|
|
|
|
- out_tmp = out_buf;
|
|
+ sg_pcopy_from_buffer(dst, dst_nents, out_buf,
|
|
+ actx->fill, dst_off);
|
|
+ dst_off += actx->fill;
|
|
last_out_len = actx->fill;
|
|
- while (dst && actx->fill) {
|
|
- if (!split) {
|
|
- dst_buf = sg_virt(dst);
|
|
- dst_off = 0;
|
|
- }
|
|
- rem = min(sg_dma_len(dst) - dst_off,
|
|
- actx->fill);
|
|
-
|
|
- memcpy(dst_buf + dst_off, out_tmp, rem);
|
|
- out_tmp += rem;
|
|
- dst_off += rem;
|
|
- actx->fill -= rem;
|
|
-
|
|
- if (dst_off == sg_dma_len(dst)) {
|
|
- dst = sg_next(dst);
|
|
- split = 0;
|
|
- } else {
|
|
- split = 1;
|
|
- }
|
|
- }
|
|
+ actx->fill = 0;
|
|
}
|
|
} while (len);
|
|
|
|
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
|
|
index 16b908c77db30..306f93e4b26a8 100644
|
|
--- a/drivers/dma/imx-sdma.c
|
|
+++ b/drivers/dma/imx-sdma.c
|
|
@@ -379,7 +379,6 @@ struct sdma_channel {
|
|
unsigned long watermark_level;
|
|
u32 shp_addr, per_addr;
|
|
enum dma_status status;
|
|
- bool context_loaded;
|
|
struct imx_dma_data data;
|
|
struct work_struct terminate_worker;
|
|
};
|
|
@@ -985,9 +984,6 @@ static int sdma_load_context(struct sdma_channel *sdmac)
|
|
int ret;
|
|
unsigned long flags;
|
|
|
|
- if (sdmac->context_loaded)
|
|
- return 0;
|
|
-
|
|
if (sdmac->direction == DMA_DEV_TO_MEM)
|
|
load_address = sdmac->pc_from_device;
|
|
else if (sdmac->direction == DMA_DEV_TO_DEV)
|
|
@@ -1030,8 +1026,6 @@ static int sdma_load_context(struct sdma_channel *sdmac)
|
|
|
|
spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
|
|
|
|
- sdmac->context_loaded = true;
|
|
-
|
|
return ret;
|
|
}
|
|
|
|
@@ -1070,7 +1064,6 @@ static void sdma_channel_terminate_work(struct work_struct *work)
|
|
vchan_get_all_descriptors(&sdmac->vc, &head);
|
|
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
|
|
vchan_dma_desc_free_list(&sdmac->vc, &head);
|
|
- sdmac->context_loaded = false;
|
|
}
|
|
|
|
static int sdma_terminate_all(struct dma_chan *chan)
|
|
@@ -1145,7 +1138,6 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
|
|
static int sdma_config_channel(struct dma_chan *chan)
|
|
{
|
|
struct sdma_channel *sdmac = to_sdma_chan(chan);
|
|
- int ret;
|
|
|
|
sdma_disable_channel(chan);
|
|
|
|
@@ -1185,9 +1177,7 @@ static int sdma_config_channel(struct dma_chan *chan)
|
|
sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
|
|
}
|
|
|
|
- ret = sdma_load_context(sdmac);
|
|
-
|
|
- return ret;
|
|
+ return 0;
|
|
}
|
|
|
|
static int sdma_set_channel_priority(struct sdma_channel *sdmac,
|
|
@@ -1338,7 +1328,6 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
|
|
|
|
sdmac->event_id0 = 0;
|
|
sdmac->event_id1 = 0;
|
|
- sdmac->context_loaded = false;
|
|
|
|
sdma_set_channel_priority(sdmac, 0);
|
|
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
|
|
index 47cad23a6b9e2..b91d3d29b4102 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
|
|
@@ -339,7 +339,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
|
|
void
|
|
amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector)
|
|
{
|
|
- u8 val;
|
|
+ u8 val = 0;
|
|
|
|
if (!amdgpu_connector->router.ddc_valid)
|
|
return;
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
|
|
index ac043baac05d6..ad9863b84f1fc 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
|
|
@@ -207,7 +207,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
|
|
c++;
|
|
}
|
|
|
|
- BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
|
|
+ BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
|
|
|
|
placement->num_placement = c;
|
|
placement->placement = places;
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
|
|
index 0e64c39a23722..7c3efc5f1be07 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
|
|
@@ -305,7 +305,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
|
|
return ret;
|
|
}
|
|
|
|
- __decode_table_header_from_buff(hdr, &buff[2]);
|
|
+ __decode_table_header_from_buff(hdr, buff);
|
|
|
|
if (hdr->header == EEPROM_TABLE_HDR_VAL) {
|
|
control->num_recs = (hdr->tbl_size - EEPROM_TABLE_HEADER_SIZE) /
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
|
|
index aa8ae0ca62f91..e8737fa438f06 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
|
|
@@ -120,7 +120,7 @@ static int vcn_v1_0_sw_init(void *handle)
|
|
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
|
|
adev->firmware.fw_size +=
|
|
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
|
|
- DRM_INFO("PSP loading VCN firmware\n");
|
|
+ dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
|
|
}
|
|
|
|
r = amdgpu_vcn_resume(adev);
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
|
|
index fc939d4f4841e..f493b5c3d382b 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
|
|
@@ -122,7 +122,7 @@ static int vcn_v2_0_sw_init(void *handle)
|
|
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
|
|
adev->firmware.fw_size +=
|
|
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
|
|
- DRM_INFO("PSP loading VCN firmware\n");
|
|
+ dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
|
|
}
|
|
|
|
r = amdgpu_vcn_resume(adev);
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
|
|
index 2c328362eee3c..ce64d4016f903 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
|
|
@@ -152,7 +152,7 @@ static int vcn_v2_5_sw_init(void *handle)
|
|
adev->firmware.fw_size +=
|
|
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
|
|
}
|
|
- DRM_INFO("PSP loading VCN firmware\n");
|
|
+ dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
|
|
}
|
|
|
|
r = amdgpu_vcn_resume(adev);
|
|
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
|
|
index c9c888be12285..2099f6ebd8338 100644
|
|
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
|
|
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
|
|
@@ -148,7 +148,7 @@ static int vcn_v3_0_sw_init(void *handle)
|
|
adev->firmware.fw_size +=
|
|
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
|
|
}
|
|
- DRM_INFO("PSP loading VCN firmware\n");
|
|
+ dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
|
|
}
|
|
|
|
r = amdgpu_vcn_resume(adev);
|
|
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
|
|
index 88813dad731fa..c021519af8106 100644
|
|
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
|
|
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
|
|
@@ -98,36 +98,78 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
|
|
uint32_t *se_mask)
|
|
{
|
|
struct kfd_cu_info cu_info;
|
|
- uint32_t cu_per_se[KFD_MAX_NUM_SE] = {0};
|
|
- int i, se, sh, cu = 0;
|
|
-
|
|
+ uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
|
|
+ int i, se, sh, cu;
|
|
amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info);
|
|
|
|
if (cu_mask_count > cu_info.cu_active_number)
|
|
cu_mask_count = cu_info.cu_active_number;
|
|
|
|
+ /* Exceeding these bounds corrupts the stack and indicates a coding error.
|
|
+ * Returning with no CU's enabled will hang the queue, which should be
|
|
+ * attention grabbing.
|
|
+ */
|
|
+ if (cu_info.num_shader_engines > KFD_MAX_NUM_SE) {
|
|
+ pr_err("Exceeded KFD_MAX_NUM_SE, chip reports %d\n", cu_info.num_shader_engines);
|
|
+ return;
|
|
+ }
|
|
+ if (cu_info.num_shader_arrays_per_engine > KFD_MAX_NUM_SH_PER_SE) {
|
|
+ pr_err("Exceeded KFD_MAX_NUM_SH, chip reports %d\n",
|
|
+ cu_info.num_shader_arrays_per_engine * cu_info.num_shader_engines);
|
|
+ return;
|
|
+ }
|
|
+ /* Count active CUs per SH.
|
|
+ *
|
|
+ * Some CUs in an SH may be disabled. HW expects disabled CUs to be
|
|
+ * represented in the high bits of each SH's enable mask (the upper and lower
|
|
+ * 16 bits of se_mask) and will take care of the actual distribution of
|
|
+ * disabled CUs within each SH automatically.
|
|
+ * Each half of se_mask must be filled only on bits 0-cu_per_sh[se][sh]-1.
|
|
+ *
|
|
+ * See note on Arcturus cu_bitmap layout in gfx_v9_0_get_cu_info.
|
|
+ */
|
|
for (se = 0; se < cu_info.num_shader_engines; se++)
|
|
for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++)
|
|
- cu_per_se[se] += hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]);
|
|
-
|
|
- /* Symmetrically map cu_mask to all SEs:
|
|
- * cu_mask[0] bit0 -> se_mask[0] bit0;
|
|
- * cu_mask[0] bit1 -> se_mask[1] bit0;
|
|
- * ... (if # SE is 4)
|
|
- * cu_mask[0] bit4 -> se_mask[0] bit1;
|
|
+ cu_per_sh[se][sh] = hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]);
|
|
+
|
|
+ /* Symmetrically map cu_mask to all SEs & SHs:
|
|
+ * se_mask programs up to 2 SH in the upper and lower 16 bits.
|
|
+ *
|
|
+ * Examples
|
|
+ * Assuming 1 SH/SE, 4 SEs:
|
|
+ * cu_mask[0] bit0 -> se_mask[0] bit0
|
|
+ * cu_mask[0] bit1 -> se_mask[1] bit0
|
|
+ * ...
|
|
+ * cu_mask[0] bit4 -> se_mask[0] bit1
|
|
+ * ...
|
|
+ *
|
|
+ * Assuming 2 SH/SE, 4 SEs
|
|
+ * cu_mask[0] bit0 -> se_mask[0] bit0 (SE0,SH0,CU0)
|
|
+ * cu_mask[0] bit1 -> se_mask[1] bit0 (SE1,SH0,CU0)
|
|
+ * ...
|
|
+ * cu_mask[0] bit4 -> se_mask[0] bit16 (SE0,SH1,CU0)
|
|
+ * cu_mask[0] bit5 -> se_mask[1] bit16 (SE1,SH1,CU0)
|
|
+ * ...
|
|
+ * cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1)
|
|
* ...
|
|
+ *
|
|
+ * First ensure all CUs are disabled, then enable user specified CUs.
|
|
*/
|
|
- se = 0;
|
|
- for (i = 0; i < cu_mask_count; i++) {
|
|
- if (cu_mask[i / 32] & (1 << (i % 32)))
|
|
- se_mask[se] |= 1 << cu;
|
|
-
|
|
- do {
|
|
- se++;
|
|
- if (se == cu_info.num_shader_engines) {
|
|
- se = 0;
|
|
- cu++;
|
|
+ for (i = 0; i < cu_info.num_shader_engines; i++)
|
|
+ se_mask[i] = 0;
|
|
+
|
|
+ i = 0;
|
|
+ for (cu = 0; cu < 16; cu++) {
|
|
+ for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) {
|
|
+ for (se = 0; se < cu_info.num_shader_engines; se++) {
|
|
+ if (cu_per_sh[se][sh] > cu) {
|
|
+ if (cu_mask[i / 32] & (1 << (i % 32)))
|
|
+ se_mask[se] |= 1 << (cu + sh * 16);
|
|
+ i++;
|
|
+ if (i == cu_mask_count)
|
|
+ return;
|
|
+ }
|
|
}
|
|
- } while (cu >= cu_per_se[se] && cu < 32);
|
|
+ }
|
|
}
|
|
}
|
|
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
|
|
index fbdb16418847c..4edc012e31387 100644
|
|
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
|
|
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
|
|
@@ -27,6 +27,7 @@
|
|
#include "kfd_priv.h"
|
|
|
|
#define KFD_MAX_NUM_SE 8
|
|
+#define KFD_MAX_NUM_SH_PER_SE 2
|
|
|
|
/**
|
|
* struct mqd_manager
|
|
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
|
|
index e02a55fc1382f..fbb65c95464b3 100644
|
|
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
|
|
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
|
|
@@ -197,29 +197,29 @@ static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
|
|
|
|
rd_buf_ptr = rd_buf;
|
|
|
|
- str_len = strlen("Current: %d %d %d ");
|
|
- snprintf(rd_buf_ptr, str_len, "Current: %d %d %d ",
|
|
+ str_len = strlen("Current: %d 0x%x %d ");
|
|
+ snprintf(rd_buf_ptr, str_len, "Current: %d 0x%x %d ",
|
|
link->cur_link_settings.lane_count,
|
|
link->cur_link_settings.link_rate,
|
|
link->cur_link_settings.link_spread);
|
|
rd_buf_ptr += str_len;
|
|
|
|
- str_len = strlen("Verified: %d %d %d ");
|
|
- snprintf(rd_buf_ptr, str_len, "Verified: %d %d %d ",
|
|
+ str_len = strlen("Verified: %d 0x%x %d ");
|
|
+ snprintf(rd_buf_ptr, str_len, "Verified: %d 0x%x %d ",
|
|
link->verified_link_cap.lane_count,
|
|
link->verified_link_cap.link_rate,
|
|
link->verified_link_cap.link_spread);
|
|
rd_buf_ptr += str_len;
|
|
|
|
- str_len = strlen("Reported: %d %d %d ");
|
|
- snprintf(rd_buf_ptr, str_len, "Reported: %d %d %d ",
|
|
+ str_len = strlen("Reported: %d 0x%x %d ");
|
|
+ snprintf(rd_buf_ptr, str_len, "Reported: %d 0x%x %d ",
|
|
link->reported_link_cap.lane_count,
|
|
link->reported_link_cap.link_rate,
|
|
link->reported_link_cap.link_spread);
|
|
rd_buf_ptr += str_len;
|
|
|
|
- str_len = strlen("Preferred: %d %d %d ");
|
|
- snprintf(rd_buf_ptr, str_len, "Preferred: %d %d %d\n",
|
|
+ str_len = strlen("Preferred: %d 0x%x %d ");
|
|
+ snprintf(rd_buf_ptr, str_len, "Preferred: %d 0x%x %d\n",
|
|
link->preferred_link_setting.lane_count,
|
|
link->preferred_link_setting.link_rate,
|
|
link->preferred_link_setting.link_spread);
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
|
|
index 0d1e7b56fb395..532f6a1145b55 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
|
|
@@ -3740,13 +3740,12 @@ enum dc_status dcn10_set_clock(struct dc *dc,
|
|
struct dc_clock_config clock_cfg = {0};
|
|
struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
|
|
|
|
- if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
|
|
- dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
|
|
- context, clock_type, &clock_cfg);
|
|
-
|
|
- if (!dc->clk_mgr->funcs->get_clock)
|
|
+ if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
|
|
return DC_FAIL_UNSUPPORTED_1;
|
|
|
|
+ dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
|
|
+ context, clock_type, &clock_cfg);
|
|
+
|
|
if (clk_khz > clock_cfg.max_clock_khz)
|
|
return DC_FAIL_CLK_EXCEED_MAX;
|
|
|
|
@@ -3764,7 +3763,7 @@ enum dc_status dcn10_set_clock(struct dc *dc,
|
|
else
|
|
return DC_ERROR_UNEXPECTED;
|
|
|
|
- if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
|
|
+ if (dc->clk_mgr->funcs->update_clocks)
|
|
dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
|
|
context, true);
|
|
return DC_OK;
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
|
|
index 9d3ccdd355825..79a2b9c785f05 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
|
|
@@ -1704,13 +1704,15 @@ void dcn20_program_front_end_for_ctx(
|
|
dcn20_program_pipe(dc, pipe, context);
|
|
pipe = pipe->bottom_pipe;
|
|
}
|
|
- /* Program secondary blending tree and writeback pipes */
|
|
- pipe = &context->res_ctx.pipe_ctx[i];
|
|
- if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0
|
|
- && (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw)
|
|
- && hws->funcs.program_all_writeback_pipes_in_tree)
|
|
- hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
|
|
}
|
|
+ /* Program secondary blending tree and writeback pipes */
|
|
+ pipe = &context->res_ctx.pipe_ctx[i];
|
|
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe
|
|
+ && pipe->stream && pipe->stream->num_wb_info > 0
|
|
+ && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
|
|
+ || pipe->stream->update_flags.raw)
|
|
+ && hws->funcs.program_all_writeback_pipes_in_tree)
|
|
+ hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
|
|
index cfe85ba1018e8..5dbc290bcbe86 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
|
|
@@ -2455,7 +2455,7 @@ void dcn20_set_mcif_arb_params(
|
|
wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
|
wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
|
}
|
|
- wb_arb_params->time_per_pixel = 16.0 / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* 4 bit fraction, ms */
|
|
+ wb_arb_params->time_per_pixel = 16.0 * 1000 / (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk / 1000); /* 4 bit fraction, ms */
|
|
wb_arb_params->slice_lines = 32;
|
|
wb_arb_params->arbitration_slice = 2;
|
|
wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel,
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
|
|
index 8593145379d99..6d621f07be489 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
|
|
@@ -49,6 +49,11 @@
|
|
static void dwb3_get_reg_field_ogam(struct dcn30_dwbc *dwbc30,
|
|
struct dcn3_xfer_func_reg *reg)
|
|
{
|
|
+ reg->shifts.field_region_start_base = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_BASE_B;
|
|
+ reg->masks.field_region_start_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_BASE_B;
|
|
+ reg->shifts.field_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_OFFSET_B;
|
|
+ reg->masks.field_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_OFFSET_B;
|
|
+
|
|
reg->shifts.exp_region0_lut_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
|
|
reg->masks.exp_region0_lut_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
|
|
reg->shifts.exp_region0_num_segments = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
|
|
@@ -66,8 +71,6 @@ static void dwb3_get_reg_field_ogam(struct dcn30_dwbc *dwbc30,
|
|
reg->masks.field_region_end_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_BASE_B;
|
|
reg->shifts.field_region_linear_slope = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
|
|
reg->masks.field_region_linear_slope = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
|
|
- reg->masks.field_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_OFFSET_B;
|
|
- reg->shifts.field_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_OFFSET_B;
|
|
reg->shifts.exp_region_start = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_B;
|
|
reg->masks.exp_region_start = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_B;
|
|
reg->shifts.exp_resion_start_segment = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
|
|
@@ -147,18 +150,19 @@ static enum dc_lut_mode dwb3_get_ogam_current(
|
|
uint32_t state_mode;
|
|
uint32_t ram_select;
|
|
|
|
- REG_GET(DWB_OGAM_CONTROL,
|
|
- DWB_OGAM_MODE, &state_mode);
|
|
- REG_GET(DWB_OGAM_CONTROL,
|
|
- DWB_OGAM_SELECT, &ram_select);
|
|
+ REG_GET_2(DWB_OGAM_CONTROL,
|
|
+ DWB_OGAM_MODE_CURRENT, &state_mode,
|
|
+ DWB_OGAM_SELECT_CURRENT, &ram_select);
|
|
|
|
if (state_mode == 0) {
|
|
mode = LUT_BYPASS;
|
|
} else if (state_mode == 2) {
|
|
if (ram_select == 0)
|
|
mode = LUT_RAM_A;
|
|
- else
|
|
+ else if (ram_select == 1)
|
|
mode = LUT_RAM_B;
|
|
+ else
|
|
+ mode = LUT_BYPASS;
|
|
} else {
|
|
// Reserved value
|
|
mode = LUT_BYPASS;
|
|
@@ -172,10 +176,10 @@ static void dwb3_configure_ogam_lut(
|
|
struct dcn30_dwbc *dwbc30,
|
|
bool is_ram_a)
|
|
{
|
|
- REG_UPDATE(DWB_OGAM_LUT_CONTROL,
|
|
- DWB_OGAM_LUT_READ_COLOR_SEL, 7);
|
|
- REG_UPDATE(DWB_OGAM_CONTROL,
|
|
- DWB_OGAM_SELECT, is_ram_a == true ? 0 : 1);
|
|
+ REG_UPDATE_2(DWB_OGAM_LUT_CONTROL,
|
|
+ DWB_OGAM_LUT_WRITE_COLOR_MASK, 7,
|
|
+ DWB_OGAM_LUT_HOST_SEL, (is_ram_a == true) ? 0 : 1);
|
|
+
|
|
REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
|
|
}
|
|
|
|
@@ -185,17 +189,45 @@ static void dwb3_program_ogam_pwl(struct dcn30_dwbc *dwbc30,
|
|
{
|
|
uint32_t i;
|
|
|
|
- // triple base implementation
|
|
- for (i = 0; i < num/2; i++) {
|
|
- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].red_reg);
|
|
- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].green_reg);
|
|
- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].blue_reg);
|
|
- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].red_reg);
|
|
- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].green_reg);
|
|
- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].blue_reg);
|
|
- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].red_reg);
|
|
- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].green_reg);
|
|
- REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].blue_reg);
|
|
+ uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
|
|
+ uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg;
|
|
+ uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg;
|
|
+
|
|
+ if (is_rgb_equal(rgb, num)) {
|
|
+ for (i = 0 ; i < num; i++)
|
|
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].red_reg);
|
|
+
|
|
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_red);
|
|
+
|
|
+ } else {
|
|
+
|
|
+ REG_UPDATE(DWB_OGAM_LUT_CONTROL,
|
|
+ DWB_OGAM_LUT_WRITE_COLOR_MASK, 4);
|
|
+
|
|
+ for (i = 0 ; i < num; i++)
|
|
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].red_reg);
|
|
+
|
|
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_red);
|
|
+
|
|
+ REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
|
|
+
|
|
+ REG_UPDATE(DWB_OGAM_LUT_CONTROL,
|
|
+ DWB_OGAM_LUT_WRITE_COLOR_MASK, 2);
|
|
+
|
|
+ for (i = 0 ; i < num; i++)
|
|
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].green_reg);
|
|
+
|
|
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_green);
|
|
+
|
|
+ REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
|
|
+
|
|
+ REG_UPDATE(DWB_OGAM_LUT_CONTROL,
|
|
+ DWB_OGAM_LUT_WRITE_COLOR_MASK, 1);
|
|
+
|
|
+ for (i = 0 ; i < num; i++)
|
|
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].blue_reg);
|
|
+
|
|
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_blue);
|
|
}
|
|
}
|
|
|
|
@@ -211,6 +243,8 @@ static bool dwb3_program_ogam_lut(
|
|
return false;
|
|
}
|
|
|
|
+ REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2);
|
|
+
|
|
current_mode = dwb3_get_ogam_current(dwbc30);
|
|
if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
|
|
next_mode = LUT_RAM_B;
|
|
@@ -227,8 +261,7 @@ static bool dwb3_program_ogam_lut(
|
|
dwb3_program_ogam_pwl(
|
|
dwbc30, params->rgb_resulted, params->hw_points_num);
|
|
|
|
- REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2);
|
|
- REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1);
|
|
+ REG_UPDATE(DWB_OGAM_CONTROL, DWB_OGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1);
|
|
|
|
return true;
|
|
}
|
|
@@ -271,14 +304,19 @@ static void dwb3_program_gamut_remap(
|
|
|
|
struct color_matrices_reg gam_regs;
|
|
|
|
- REG_UPDATE(DWB_GAMUT_REMAP_COEF_FORMAT, DWB_GAMUT_REMAP_COEF_FORMAT, coef_format);
|
|
-
|
|
if (regval == NULL || select == CM_GAMUT_REMAP_MODE_BYPASS) {
|
|
REG_SET(DWB_GAMUT_REMAP_MODE, 0,
|
|
DWB_GAMUT_REMAP_MODE, 0);
|
|
return;
|
|
}
|
|
|
|
+ REG_UPDATE(DWB_GAMUT_REMAP_COEF_FORMAT, DWB_GAMUT_REMAP_COEF_FORMAT, coef_format);
|
|
+
|
|
+ gam_regs.shifts.csc_c11 = dwbc30->dwbc_shift->DWB_GAMUT_REMAPA_C11;
|
|
+ gam_regs.masks.csc_c11 = dwbc30->dwbc_mask->DWB_GAMUT_REMAPA_C11;
|
|
+ gam_regs.shifts.csc_c12 = dwbc30->dwbc_shift->DWB_GAMUT_REMAPA_C12;
|
|
+ gam_regs.masks.csc_c12 = dwbc30->dwbc_mask->DWB_GAMUT_REMAPA_C12;
|
|
+
|
|
switch (select) {
|
|
case CM_GAMUT_REMAP_MODE_RAMA_COEFF:
|
|
gam_regs.csc_c11_c12 = REG(DWB_GAMUT_REMAPA_C11_C12);
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
|
|
index 97909d5aab344..22c77e96f6a54 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
|
|
@@ -396,12 +396,22 @@ void dcn30_program_all_writeback_pipes_in_tree(
|
|
for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) {
|
|
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe];
|
|
|
|
+ if (!pipe_ctx->plane_state)
|
|
+ continue;
|
|
+
|
|
if (pipe_ctx->plane_state == wb_info.writeback_source_plane) {
|
|
wb_info.mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
|
|
break;
|
|
}
|
|
}
|
|
- ASSERT(wb_info.mpcc_inst != -1);
|
|
+
|
|
+ if (wb_info.mpcc_inst == -1) {
|
|
+ /* Disable writeback pipe and disconnect from MPCC
|
|
+ * if source plane has been removed
|
|
+ */
|
|
+ dc->hwss.disable_writeback(dc, wb_info.dwb_pipe_inst);
|
|
+ continue;
|
|
+ }
|
|
|
|
ASSERT(wb_info.dwb_pipe_inst < dc->res_pool->res_cap->num_dwb);
|
|
dwb = dc->res_pool->dwbc[wb_info.dwb_pipe_inst];
|
|
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
|
|
index e5f4f93317cf3..32993ce24a585 100644
|
|
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
|
|
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
|
|
@@ -2455,16 +2455,37 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
|
|
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
|
|
|
|
if (bw_params->clk_table.entries[0].memclk_mhz) {
|
|
+ int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
|
|
+
|
|
+ for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
|
|
+ if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
|
|
+ max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
|
|
+ if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
|
|
+ max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
|
|
+ if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
|
|
+ max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
|
|
+ if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
|
|
+ max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
|
|
+ }
|
|
+
|
|
+ if (!max_dcfclk_mhz)
|
|
+ max_dcfclk_mhz = dcn3_0_soc.clock_limits[0].dcfclk_mhz;
|
|
+ if (!max_dispclk_mhz)
|
|
+ max_dispclk_mhz = dcn3_0_soc.clock_limits[0].dispclk_mhz;
|
|
+ if (!max_dppclk_mhz)
|
|
+ max_dppclk_mhz = dcn3_0_soc.clock_limits[0].dppclk_mhz;
|
|
+ if (!max_phyclk_mhz)
|
|
+ max_phyclk_mhz = dcn3_0_soc.clock_limits[0].phyclk_mhz;
|
|
|
|
- if (bw_params->clk_table.entries[1].dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
|
|
+ if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
|
|
// If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array
|
|
- dcfclk_sta_targets[num_dcfclk_sta_targets] = bw_params->clk_table.entries[1].dcfclk_mhz;
|
|
+ dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
|
|
num_dcfclk_sta_targets++;
|
|
- } else if (bw_params->clk_table.entries[1].dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
|
|
+ } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
|
|
// If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates
|
|
for (i = 0; i < num_dcfclk_sta_targets; i++) {
|
|
- if (dcfclk_sta_targets[i] > bw_params->clk_table.entries[1].dcfclk_mhz) {
|
|
- dcfclk_sta_targets[i] = bw_params->clk_table.entries[1].dcfclk_mhz;
|
|
+ if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
|
|
+ dcfclk_sta_targets[i] = max_dcfclk_mhz;
|
|
break;
|
|
}
|
|
}
|
|
@@ -2502,7 +2523,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
|
|
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
|
|
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
|
|
} else {
|
|
- if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= bw_params->clk_table.entries[1].dcfclk_mhz) {
|
|
+ if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
|
|
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
|
|
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
|
|
} else {
|
|
@@ -2517,11 +2538,12 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
|
|
}
|
|
|
|
while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
|
|
- optimal_dcfclk_for_uclk[j] <= bw_params->clk_table.entries[1].dcfclk_mhz) {
|
|
+ optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
|
|
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
|
|
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
|
|
}
|
|
|
|
+ dcn3_0_soc.num_states = num_states;
|
|
for (i = 0; i < dcn3_0_soc.num_states; i++) {
|
|
dcn3_0_soc.clock_limits[i].state = i;
|
|
dcn3_0_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
|
|
@@ -2529,9 +2551,9 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
|
|
dcn3_0_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
|
|
|
|
/* Fill all states with max values of all other clocks */
|
|
- dcn3_0_soc.clock_limits[i].dispclk_mhz = bw_params->clk_table.entries[1].dispclk_mhz;
|
|
- dcn3_0_soc.clock_limits[i].dppclk_mhz = bw_params->clk_table.entries[1].dppclk_mhz;
|
|
- dcn3_0_soc.clock_limits[i].phyclk_mhz = bw_params->clk_table.entries[1].phyclk_mhz;
|
|
+ dcn3_0_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
|
|
+ dcn3_0_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
|
|
+ dcn3_0_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
|
|
dcn3_0_soc.clock_limits[i].dtbclk_mhz = dcn3_0_soc.clock_limits[0].dtbclk_mhz;
|
|
/* These clocks cannot come from bw_params, always fill from dcn3_0_soc[1] */
|
|
/* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
|
|
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
|
|
index c65ca860712d2..6cac2e58cd15f 100644
|
|
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
|
|
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
|
|
@@ -196,7 +196,7 @@ static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps)
|
|
u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
|
|
|
|
return DIV64_U64_ROUND_UP(ps * dsi->mode.clock * bpp,
|
|
- dsi->lanes * 8 * NSEC_PER_SEC);
|
|
+ dsi->lanes * 8ULL * NSEC_PER_SEC);
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
|
|
index 232abbba36868..c7adbeaf10b1b 100644
|
|
--- a/drivers/gpu/drm/drm_auth.c
|
|
+++ b/drivers/gpu/drm/drm_auth.c
|
|
@@ -135,16 +135,18 @@ static void drm_set_master(struct drm_device *dev, struct drm_file *fpriv,
|
|
static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
|
|
{
|
|
struct drm_master *old_master;
|
|
+ struct drm_master *new_master;
|
|
|
|
lockdep_assert_held_once(&dev->master_mutex);
|
|
|
|
WARN_ON(fpriv->is_master);
|
|
old_master = fpriv->master;
|
|
- fpriv->master = drm_master_create(dev);
|
|
- if (!fpriv->master) {
|
|
- fpriv->master = old_master;
|
|
+ new_master = drm_master_create(dev);
|
|
+ if (!new_master)
|
|
return -ENOMEM;
|
|
- }
|
|
+ spin_lock(&fpriv->master_lookup_lock);
|
|
+ fpriv->master = new_master;
|
|
+ spin_unlock(&fpriv->master_lookup_lock);
|
|
|
|
fpriv->is_master = 1;
|
|
fpriv->authenticated = 1;
|
|
@@ -302,10 +304,13 @@ int drm_master_open(struct drm_file *file_priv)
|
|
/* if there is no current master make this fd it, but do not create
|
|
* any master object for render clients */
|
|
mutex_lock(&dev->master_mutex);
|
|
- if (!dev->master)
|
|
+ if (!dev->master) {
|
|
ret = drm_new_set_master(dev, file_priv);
|
|
- else
|
|
+ } else {
|
|
+ spin_lock(&file_priv->master_lookup_lock);
|
|
file_priv->master = drm_master_get(dev->master);
|
|
+ spin_unlock(&file_priv->master_lookup_lock);
|
|
+ }
|
|
mutex_unlock(&dev->master_mutex);
|
|
|
|
return ret;
|
|
@@ -371,6 +376,31 @@ struct drm_master *drm_master_get(struct drm_master *master)
|
|
}
|
|
EXPORT_SYMBOL(drm_master_get);
|
|
|
|
+/**
|
|
+ * drm_file_get_master - reference &drm_file.master of @file_priv
|
|
+ * @file_priv: DRM file private
|
|
+ *
|
|
+ * Increments the reference count of @file_priv's &drm_file.master and returns
|
|
+ * the &drm_file.master. If @file_priv has no &drm_file.master, returns NULL.
|
|
+ *
|
|
+ * Master pointers returned from this function should be unreferenced using
|
|
+ * drm_master_put().
|
|
+ */
|
|
+struct drm_master *drm_file_get_master(struct drm_file *file_priv)
|
|
+{
|
|
+ struct drm_master *master = NULL;
|
|
+
|
|
+ spin_lock(&file_priv->master_lookup_lock);
|
|
+ if (!file_priv->master)
|
|
+ goto unlock;
|
|
+ master = drm_master_get(file_priv->master);
|
|
+
|
|
+unlock:
|
|
+ spin_unlock(&file_priv->master_lookup_lock);
|
|
+ return master;
|
|
+}
|
|
+EXPORT_SYMBOL(drm_file_get_master);
|
|
+
|
|
static void drm_master_destroy(struct kref *kref)
|
|
{
|
|
struct drm_master *master = container_of(kref, struct drm_master, refcount);
|
|
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
|
|
index 3d7182001004d..b0a8264894885 100644
|
|
--- a/drivers/gpu/drm/drm_debugfs.c
|
|
+++ b/drivers/gpu/drm/drm_debugfs.c
|
|
@@ -91,6 +91,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
|
|
mutex_lock(&dev->filelist_mutex);
|
|
list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
|
|
struct task_struct *task;
|
|
+ bool is_current_master = drm_is_current_master(priv);
|
|
|
|
rcu_read_lock(); /* locks pid_task()->comm */
|
|
task = pid_task(priv->pid, PIDTYPE_PID);
|
|
@@ -99,7 +100,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
|
|
task ? task->comm : "<unknown>",
|
|
pid_vnr(priv->pid),
|
|
priv->minor->index,
|
|
- drm_is_current_master(priv) ? 'y' : 'n',
|
|
+ is_current_master ? 'y' : 'n',
|
|
priv->authenticated ? 'y' : 'n',
|
|
from_kuid_munged(seq_user_ns(m), uid),
|
|
priv->magic);
|
|
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
|
|
index 861f16dfd1a3d..1f54e9470165a 100644
|
|
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
|
|
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
|
|
@@ -2869,11 +2869,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
|
|
idx += tosend + 1;
|
|
|
|
ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
|
|
- if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
|
|
- struct drm_printer p = drm_debug_printer(DBG_PREFIX);
|
|
+ if (ret) {
|
|
+ if (drm_debug_enabled(DRM_UT_DP)) {
|
|
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
|
|
|
|
- drm_printf(&p, "sideband msg failed to send\n");
|
|
- drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
|
|
+ drm_printf(&p, "sideband msg failed to send\n");
|
|
+ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
|
|
+ }
|
|
return ret;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
|
|
index 0ac4566ae3f45..537e7de8e9c33 100644
|
|
--- a/drivers/gpu/drm/drm_file.c
|
|
+++ b/drivers/gpu/drm/drm_file.c
|
|
@@ -177,6 +177,7 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
|
|
init_waitqueue_head(&file->event_wait);
|
|
file->event_space = 4096; /* set aside 4k for event buffer */
|
|
|
|
+ spin_lock_init(&file->master_lookup_lock);
|
|
mutex_init(&file->event_read_lock);
|
|
|
|
if (drm_core_check_feature(dev, DRIVER_GEM))
|
|
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
|
|
index da4f085fc09e7..aef22634005ef 100644
|
|
--- a/drivers/gpu/drm/drm_lease.c
|
|
+++ b/drivers/gpu/drm/drm_lease.c
|
|
@@ -107,10 +107,19 @@ static bool _drm_has_leased(struct drm_master *master, int id)
|
|
*/
|
|
bool _drm_lease_held(struct drm_file *file_priv, int id)
|
|
{
|
|
- if (!file_priv || !file_priv->master)
|
|
+ bool ret;
|
|
+ struct drm_master *master;
|
|
+
|
|
+ if (!file_priv)
|
|
return true;
|
|
|
|
- return _drm_lease_held_master(file_priv->master, id);
|
|
+ master = drm_file_get_master(file_priv);
|
|
+ if (!master)
|
|
+ return true;
|
|
+ ret = _drm_lease_held_master(master, id);
|
|
+ drm_master_put(&master);
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
/**
|
|
@@ -129,13 +138,22 @@ bool drm_lease_held(struct drm_file *file_priv, int id)
|
|
struct drm_master *master;
|
|
bool ret;
|
|
|
|
- if (!file_priv || !file_priv->master || !file_priv->master->lessor)
|
|
+ if (!file_priv)
|
|
return true;
|
|
|
|
- master = file_priv->master;
|
|
+ master = drm_file_get_master(file_priv);
|
|
+ if (!master)
|
|
+ return true;
|
|
+ if (!master->lessor) {
|
|
+ ret = true;
|
|
+ goto out;
|
|
+ }
|
|
mutex_lock(&master->dev->mode_config.idr_mutex);
|
|
ret = _drm_lease_held_master(master, id);
|
|
mutex_unlock(&master->dev->mode_config.idr_mutex);
|
|
+
|
|
+out:
|
|
+ drm_master_put(&master);
|
|
return ret;
|
|
}
|
|
|
|
@@ -155,10 +173,16 @@ uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in)
|
|
int count_in, count_out;
|
|
uint32_t crtcs_out = 0;
|
|
|
|
- if (!file_priv || !file_priv->master || !file_priv->master->lessor)
|
|
+ if (!file_priv)
|
|
return crtcs_in;
|
|
|
|
- master = file_priv->master;
|
|
+ master = drm_file_get_master(file_priv);
|
|
+ if (!master)
|
|
+ return crtcs_in;
|
|
+ if (!master->lessor) {
|
|
+ crtcs_out = crtcs_in;
|
|
+ goto out;
|
|
+ }
|
|
dev = master->dev;
|
|
|
|
count_in = count_out = 0;
|
|
@@ -177,6 +201,9 @@ uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in)
|
|
count_in++;
|
|
}
|
|
mutex_unlock(&master->dev->mode_config.idr_mutex);
|
|
+
|
|
+out:
|
|
+ drm_master_put(&master);
|
|
return crtcs_out;
|
|
}
|
|
|
|
@@ -490,7 +517,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
|
|
size_t object_count;
|
|
int ret = 0;
|
|
struct idr leases;
|
|
- struct drm_master *lessor = lessor_priv->master;
|
|
+ struct drm_master *lessor;
|
|
struct drm_master *lessee = NULL;
|
|
struct file *lessee_file = NULL;
|
|
struct file *lessor_file = lessor_priv->filp;
|
|
@@ -502,12 +529,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
|
|
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
|
return -EOPNOTSUPP;
|
|
|
|
- /* Do not allow sub-leases */
|
|
- if (lessor->lessor) {
|
|
- DRM_DEBUG_LEASE("recursive leasing not allowed\n");
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
/* need some objects */
|
|
if (cl->object_count == 0) {
|
|
DRM_DEBUG_LEASE("no objects in lease\n");
|
|
@@ -519,12 +540,22 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
|
|
return -EINVAL;
|
|
}
|
|
|
|
+ lessor = drm_file_get_master(lessor_priv);
|
|
+ /* Do not allow sub-leases */
|
|
+ if (lessor->lessor) {
|
|
+ DRM_DEBUG_LEASE("recursive leasing not allowed\n");
|
|
+ ret = -EINVAL;
|
|
+ goto out_lessor;
|
|
+ }
|
|
+
|
|
object_count = cl->object_count;
|
|
|
|
object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
|
|
array_size(object_count, sizeof(__u32)));
|
|
- if (IS_ERR(object_ids))
|
|
- return PTR_ERR(object_ids);
|
|
+ if (IS_ERR(object_ids)) {
|
|
+ ret = PTR_ERR(object_ids);
|
|
+ goto out_lessor;
|
|
+ }
|
|
|
|
idr_init(&leases);
|
|
|
|
@@ -535,14 +566,15 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
|
|
if (ret) {
|
|
DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret);
|
|
idr_destroy(&leases);
|
|
- return ret;
|
|
+ goto out_lessor;
|
|
}
|
|
|
|
/* Allocate a file descriptor for the lease */
|
|
fd = get_unused_fd_flags(cl->flags & (O_CLOEXEC | O_NONBLOCK));
|
|
if (fd < 0) {
|
|
idr_destroy(&leases);
|
|
- return fd;
|
|
+ ret = fd;
|
|
+ goto out_lessor;
|
|
}
|
|
|
|
DRM_DEBUG_LEASE("Creating lease\n");
|
|
@@ -578,6 +610,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
|
|
/* Hook up the fd */
|
|
fd_install(fd, lessee_file);
|
|
|
|
+ drm_master_put(&lessor);
|
|
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
|
|
return 0;
|
|
|
|
@@ -587,6 +620,8 @@ out_lessee:
|
|
out_leases:
|
|
put_unused_fd(fd);
|
|
|
|
+out_lessor:
|
|
+ drm_master_put(&lessor);
|
|
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
|
|
return ret;
|
|
}
|
|
@@ -609,7 +644,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
|
|
struct drm_mode_list_lessees *arg = data;
|
|
__u32 __user *lessee_ids = (__u32 __user *) (uintptr_t) (arg->lessees_ptr);
|
|
__u32 count_lessees = arg->count_lessees;
|
|
- struct drm_master *lessor = lessor_priv->master, *lessee;
|
|
+ struct drm_master *lessor, *lessee;
|
|
int count;
|
|
int ret = 0;
|
|
|
|
@@ -620,6 +655,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
|
|
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
|
return -EOPNOTSUPP;
|
|
|
|
+ lessor = drm_file_get_master(lessor_priv);
|
|
DRM_DEBUG_LEASE("List lessees for %d\n", lessor->lessee_id);
|
|
|
|
mutex_lock(&dev->mode_config.idr_mutex);
|
|
@@ -643,6 +679,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
|
|
arg->count_lessees = count;
|
|
|
|
mutex_unlock(&dev->mode_config.idr_mutex);
|
|
+ drm_master_put(&lessor);
|
|
|
|
return ret;
|
|
}
|
|
@@ -662,7 +699,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
|
|
struct drm_mode_get_lease *arg = data;
|
|
__u32 __user *object_ids = (__u32 __user *) (uintptr_t) (arg->objects_ptr);
|
|
__u32 count_objects = arg->count_objects;
|
|
- struct drm_master *lessee = lessee_priv->master;
|
|
+ struct drm_master *lessee;
|
|
struct idr *object_idr;
|
|
int count;
|
|
void *entry;
|
|
@@ -676,6 +713,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
|
|
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
|
return -EOPNOTSUPP;
|
|
|
|
+ lessee = drm_file_get_master(lessee_priv);
|
|
DRM_DEBUG_LEASE("get lease for %d\n", lessee->lessee_id);
|
|
|
|
mutex_lock(&dev->mode_config.idr_mutex);
|
|
@@ -703,6 +741,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
|
|
arg->count_objects = count;
|
|
|
|
mutex_unlock(&dev->mode_config.idr_mutex);
|
|
+ drm_master_put(&lessee);
|
|
|
|
return ret;
|
|
}
|
|
@@ -721,7 +760,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev,
|
|
void *data, struct drm_file *lessor_priv)
|
|
{
|
|
struct drm_mode_revoke_lease *arg = data;
|
|
- struct drm_master *lessor = lessor_priv->master;
|
|
+ struct drm_master *lessor;
|
|
struct drm_master *lessee;
|
|
int ret = 0;
|
|
|
|
@@ -731,6 +770,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev,
|
|
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
|
return -EOPNOTSUPP;
|
|
|
|
+ lessor = drm_file_get_master(lessor_priv);
|
|
mutex_lock(&dev->mode_config.idr_mutex);
|
|
|
|
lessee = _drm_find_lessee(lessor, arg->lessee_id);
|
|
@@ -751,6 +791,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev,
|
|
|
|
fail:
|
|
mutex_unlock(&dev->mode_config.idr_mutex);
|
|
+ drm_master_put(&lessor);
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
|
|
index 0644936afee26..bf33c3084cb41 100644
|
|
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
|
|
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
|
|
@@ -115,6 +115,8 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
|
|
EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
|
|
else if (IS_ENABLED(CONFIG_IOMMU_DMA))
|
|
mapping = iommu_get_domain_for_dev(priv->dma_dev);
|
|
+ else
|
|
+ mapping = ERR_PTR(-ENODEV);
|
|
|
|
if (IS_ERR(mapping))
|
|
return PTR_ERR(mapping);
|
|
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
|
|
index 749a075fe9e4c..d1b51c133e27a 100644
|
|
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
|
|
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
|
|
@@ -43,6 +43,22 @@
|
|
#define ATTR_INDEX 0x1fc0
|
|
#define ATTR_DATA 0x1fc1
|
|
|
|
+#define WREG_MISC(v) \
|
|
+ WREG8(MGA_MISC_OUT, v)
|
|
+
|
|
+#define RREG_MISC(v) \
|
|
+ ((v) = RREG8(MGA_MISC_IN))
|
|
+
|
|
+#define WREG_MISC_MASKED(v, mask) \
|
|
+ do { \
|
|
+ u8 misc_; \
|
|
+ u8 mask_ = (mask); \
|
|
+ RREG_MISC(misc_); \
|
|
+ misc_ &= ~mask_; \
|
|
+ misc_ |= ((v) & mask_); \
|
|
+ WREG_MISC(misc_); \
|
|
+ } while (0)
|
|
+
|
|
#define WREG_ATTR(reg, v) \
|
|
do { \
|
|
RREG8(0x1fda); \
|
|
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
|
|
index 38672f9e5c4f3..509968c0d16bc 100644
|
|
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
|
|
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
|
|
@@ -172,6 +172,8 @@ static int mgag200_g200_set_plls(struct mga_device *mdev, long clock)
|
|
drm_dbg_kms(dev, "clock: %ld vco: %ld m: %d n: %d p: %d s: %d\n",
|
|
clock, f_vco, m, n, p, s);
|
|
|
|
+ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
|
|
+
|
|
WREG_DAC(MGA1064_PIX_PLLC_M, m);
|
|
WREG_DAC(MGA1064_PIX_PLLC_N, n);
|
|
WREG_DAC(MGA1064_PIX_PLLC_P, (p | (s << 3)));
|
|
@@ -287,6 +289,8 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
|
|
return 1;
|
|
}
|
|
|
|
+ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
|
|
+
|
|
WREG_DAC(MGA1064_PIX_PLLC_M, m);
|
|
WREG_DAC(MGA1064_PIX_PLLC_N, n);
|
|
WREG_DAC(MGA1064_PIX_PLLC_P, p);
|
|
@@ -383,6 +387,8 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
|
|
}
|
|
}
|
|
|
|
+ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
|
|
+
|
|
for (i = 0; i <= 32 && pll_locked == false; i++) {
|
|
if (i > 0) {
|
|
WREG8(MGAREG_CRTC_INDEX, 0x1e);
|
|
@@ -520,6 +526,8 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
|
|
}
|
|
}
|
|
|
|
+ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
|
|
+
|
|
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
|
|
tmp = RREG8(DAC_DATA);
|
|
tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
|
|
@@ -652,6 +660,9 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
|
|
}
|
|
}
|
|
}
|
|
+
|
|
+ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
|
|
+
|
|
for (i = 0; i <= 32 && pll_locked == false; i++) {
|
|
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
|
|
tmp = RREG8(DAC_DATA);
|
|
@@ -752,6 +763,8 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
|
|
}
|
|
}
|
|
|
|
+ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
|
|
+
|
|
WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
|
|
tmp = RREG8(DAC_DATA);
|
|
tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
|
|
@@ -785,8 +798,6 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
|
|
|
|
static int mgag200_crtc_set_plls(struct mga_device *mdev, long clock)
|
|
{
|
|
- u8 misc;
|
|
-
|
|
switch(mdev->type) {
|
|
case G200_PCI:
|
|
case G200_AGP:
|
|
@@ -811,11 +822,6 @@ static int mgag200_crtc_set_plls(struct mga_device *mdev, long clock)
|
|
break;
|
|
}
|
|
|
|
- misc = RREG8(MGA_MISC_IN);
|
|
- misc &= ~MGAREG_MISC_CLK_SEL_MASK;
|
|
- misc |= MGAREG_MISC_CLK_SEL_MGA_MSK;
|
|
- WREG8(MGA_MISC_OUT, misc);
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
|
|
index 977be0565c061..60e705283fe84 100644
|
|
--- a/drivers/gpu/drm/mgag200/mgag200_reg.h
|
|
+++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
|
|
@@ -222,11 +222,10 @@
|
|
|
|
#define MGAREG_MISC_IOADSEL (0x1 << 0)
|
|
#define MGAREG_MISC_RAMMAPEN (0x1 << 1)
|
|
-#define MGAREG_MISC_CLK_SEL_MASK GENMASK(3, 2)
|
|
-#define MGAREG_MISC_CLK_SEL_VGA25 (0x0 << 2)
|
|
-#define MGAREG_MISC_CLK_SEL_VGA28 (0x1 << 2)
|
|
-#define MGAREG_MISC_CLK_SEL_MGA_PIX (0x2 << 2)
|
|
-#define MGAREG_MISC_CLK_SEL_MGA_MSK (0x3 << 2)
|
|
+#define MGAREG_MISC_CLKSEL_MASK GENMASK(3, 2)
|
|
+#define MGAREG_MISC_CLKSEL_VGA25 (0x0 << 2)
|
|
+#define MGAREG_MISC_CLKSEL_VGA28 (0x1 << 2)
|
|
+#define MGAREG_MISC_CLKSEL_MGA (0x3 << 2)
|
|
#define MGAREG_MISC_VIDEO_DIS (0x1 << 4)
|
|
#define MGAREG_MISC_HIGH_PG_SEL (0x1 << 5)
|
|
#define MGAREG_MISC_HSYNCPOL BIT(6)
|
|
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
|
|
index c1c152e39918b..913de5938782a 100644
|
|
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
|
|
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
|
|
@@ -89,13 +89,6 @@ static void mdp4_disable_commit(struct msm_kms *kms)
|
|
|
|
static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
|
|
{
|
|
- int i;
|
|
- struct drm_crtc *crtc;
|
|
- struct drm_crtc_state *crtc_state;
|
|
-
|
|
- /* see 119ecb7fd */
|
|
- for_each_new_crtc_in_state(state, crtc, crtc_state, i)
|
|
- drm_crtc_vblank_get(crtc);
|
|
}
|
|
|
|
static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
|
|
@@ -114,12 +107,6 @@ static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
|
|
|
|
static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
|
|
{
|
|
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
|
|
- struct drm_crtc *crtc;
|
|
-
|
|
- /* see 119ecb7fd */
|
|
- for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
|
|
- drm_crtc_vblank_put(crtc);
|
|
}
|
|
|
|
static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
|
|
@@ -410,6 +397,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
|
{
|
|
struct platform_device *pdev = to_platform_device(dev->dev);
|
|
struct mdp4_platform_config *config = mdp4_get_config(pdev);
|
|
+ struct msm_drm_private *priv = dev->dev_private;
|
|
struct mdp4_kms *mdp4_kms;
|
|
struct msm_kms *kms = NULL;
|
|
struct msm_gem_address_space *aspace;
|
|
@@ -425,7 +413,8 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
|
|
|
mdp_kms_init(&mdp4_kms->base, &kms_funcs);
|
|
|
|
- kms = &mdp4_kms->base.base;
|
|
+ priv->kms = &mdp4_kms->base.base;
|
|
+ kms = priv->kms;
|
|
|
|
mdp4_kms->dev = dev;
|
|
|
|
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
|
|
index 18cec4fc5e0ba..2768d1d306f00 100644
|
|
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
|
|
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
|
|
@@ -261,7 +261,7 @@ static u8 dp_panel_get_edid_checksum(struct edid *edid)
|
|
{
|
|
struct edid *last_block;
|
|
u8 *raw_edid;
|
|
- bool is_edid_corrupt;
|
|
+ bool is_edid_corrupt = false;
|
|
|
|
if (!edid) {
|
|
DRM_ERROR("invalid edid input\n");
|
|
@@ -293,7 +293,12 @@ void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
|
|
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
|
|
|
|
if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) {
|
|
- u8 checksum = dp_panel_get_edid_checksum(dp_panel->edid);
|
|
+ u8 checksum;
|
|
+
|
|
+ if (dp_panel->edid)
|
|
+ checksum = dp_panel_get_edid_checksum(dp_panel->edid);
|
|
+ else
|
|
+ checksum = dp_panel->connector->real_edid_checksum;
|
|
|
|
dp_link_send_edid_checksum(panel->link, checksum);
|
|
dp_link_send_test_response(panel->link);
|
|
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
|
|
index b2ff68a15791a..d255bea87ca41 100644
|
|
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
|
|
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
|
|
@@ -158,7 +158,6 @@ static const struct msm_dsi_config sdm660_dsi_cfg = {
|
|
.reg_cfg = {
|
|
.num = 2,
|
|
.regs = {
|
|
- {"vdd", 73400, 32 }, /* 0.9 V */
|
|
{"vdda", 12560, 4 }, /* 1.2 V */
|
|
},
|
|
},
|
|
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
|
|
index 519400501bcdf..1ca9e73c6e078 100644
|
|
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
|
|
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
|
|
@@ -168,7 +168,7 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
|
|
.reg_cfg = {
|
|
.num = 1,
|
|
.regs = {
|
|
- {"vcca", 17000, 32},
|
|
+ {"vcca", 73400, 32},
|
|
},
|
|
},
|
|
.ops = {
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
|
|
index 597cf1459b0a8..4c6bdea5537b9 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
|
|
@@ -120,8 +120,12 @@ struct panfrost_device {
|
|
};
|
|
|
|
struct panfrost_mmu {
|
|
+ struct panfrost_device *pfdev;
|
|
+ struct kref refcount;
|
|
struct io_pgtable_cfg pgtbl_cfg;
|
|
struct io_pgtable_ops *pgtbl_ops;
|
|
+ struct drm_mm mm;
|
|
+ spinlock_t mm_lock;
|
|
int as;
|
|
atomic_t as_count;
|
|
struct list_head list;
|
|
@@ -132,9 +136,7 @@ struct panfrost_file_priv {
|
|
|
|
struct drm_sched_entity sched_entity[NUM_JOB_SLOTS];
|
|
|
|
- struct panfrost_mmu mmu;
|
|
- struct drm_mm mm;
|
|
- spinlock_t mm_lock;
|
|
+ struct panfrost_mmu *mmu;
|
|
};
|
|
|
|
static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev)
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
|
|
index 689be734ed200..a70261809cdd2 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
|
|
@@ -417,7 +417,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
|
|
* anyway, so let's not bother.
|
|
*/
|
|
if (!list_is_singular(&bo->mappings.list) ||
|
|
- WARN_ON_ONCE(first->mmu != &priv->mmu)) {
|
|
+ WARN_ON_ONCE(first->mmu != priv->mmu)) {
|
|
ret = -EINVAL;
|
|
goto out_unlock_mappings;
|
|
}
|
|
@@ -449,32 +449,6 @@ int panfrost_unstable_ioctl_check(void)
|
|
return 0;
|
|
}
|
|
|
|
-#define PFN_4G (SZ_4G >> PAGE_SHIFT)
|
|
-#define PFN_4G_MASK (PFN_4G - 1)
|
|
-#define PFN_16M (SZ_16M >> PAGE_SHIFT)
|
|
-
|
|
-static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
|
|
- unsigned long color,
|
|
- u64 *start, u64 *end)
|
|
-{
|
|
- /* Executable buffers can't start or end on a 4GB boundary */
|
|
- if (!(color & PANFROST_BO_NOEXEC)) {
|
|
- u64 next_seg;
|
|
-
|
|
- if ((*start & PFN_4G_MASK) == 0)
|
|
- (*start)++;
|
|
-
|
|
- if ((*end & PFN_4G_MASK) == 0)
|
|
- (*end)--;
|
|
-
|
|
- next_seg = ALIGN(*start, PFN_4G);
|
|
- if (next_seg - *start <= PFN_16M)
|
|
- *start = next_seg + 1;
|
|
-
|
|
- *end = min(*end, ALIGN(*start, PFN_4G) - 1);
|
|
- }
|
|
-}
|
|
-
|
|
static int
|
|
panfrost_open(struct drm_device *dev, struct drm_file *file)
|
|
{
|
|
@@ -489,15 +463,11 @@ panfrost_open(struct drm_device *dev, struct drm_file *file)
|
|
panfrost_priv->pfdev = pfdev;
|
|
file->driver_priv = panfrost_priv;
|
|
|
|
- spin_lock_init(&panfrost_priv->mm_lock);
|
|
-
|
|
- /* 4G enough for now. can be 48-bit */
|
|
- drm_mm_init(&panfrost_priv->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
|
|
- panfrost_priv->mm.color_adjust = panfrost_drm_mm_color_adjust;
|
|
-
|
|
- ret = panfrost_mmu_pgtable_alloc(panfrost_priv);
|
|
- if (ret)
|
|
- goto err_pgtable;
|
|
+ panfrost_priv->mmu = panfrost_mmu_ctx_create(pfdev);
|
|
+ if (IS_ERR(panfrost_priv->mmu)) {
|
|
+ ret = PTR_ERR(panfrost_priv->mmu);
|
|
+ goto err_free;
|
|
+ }
|
|
|
|
ret = panfrost_job_open(panfrost_priv);
|
|
if (ret)
|
|
@@ -506,9 +476,8 @@ panfrost_open(struct drm_device *dev, struct drm_file *file)
|
|
return 0;
|
|
|
|
err_job:
|
|
- panfrost_mmu_pgtable_free(panfrost_priv);
|
|
-err_pgtable:
|
|
- drm_mm_takedown(&panfrost_priv->mm);
|
|
+ panfrost_mmu_ctx_put(panfrost_priv->mmu);
|
|
+err_free:
|
|
kfree(panfrost_priv);
|
|
return ret;
|
|
}
|
|
@@ -521,8 +490,7 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file)
|
|
panfrost_perfcnt_close(file);
|
|
panfrost_job_close(panfrost_priv);
|
|
|
|
- panfrost_mmu_pgtable_free(panfrost_priv);
|
|
- drm_mm_takedown(&panfrost_priv->mm);
|
|
+ panfrost_mmu_ctx_put(panfrost_priv->mmu);
|
|
kfree(panfrost_priv);
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
|
|
index 57a31dd0ffed1..1d917cea5ceb4 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
|
|
@@ -60,7 +60,7 @@ panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
|
|
|
|
mutex_lock(&bo->mappings.lock);
|
|
list_for_each_entry(iter, &bo->mappings.list, node) {
|
|
- if (iter->mmu == &priv->mmu) {
|
|
+ if (iter->mmu == priv->mmu) {
|
|
kref_get(&iter->refcount);
|
|
mapping = iter;
|
|
break;
|
|
@@ -74,16 +74,13 @@ panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
|
|
static void
|
|
panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
|
|
{
|
|
- struct panfrost_file_priv *priv;
|
|
-
|
|
if (mapping->active)
|
|
panfrost_mmu_unmap(mapping);
|
|
|
|
- priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
|
|
- spin_lock(&priv->mm_lock);
|
|
+ spin_lock(&mapping->mmu->mm_lock);
|
|
if (drm_mm_node_allocated(&mapping->mmnode))
|
|
drm_mm_remove_node(&mapping->mmnode);
|
|
- spin_unlock(&priv->mm_lock);
|
|
+ spin_unlock(&mapping->mmu->mm_lock);
|
|
}
|
|
|
|
static void panfrost_gem_mapping_release(struct kref *kref)
|
|
@@ -94,6 +91,7 @@ static void panfrost_gem_mapping_release(struct kref *kref)
|
|
|
|
panfrost_gem_teardown_mapping(mapping);
|
|
drm_gem_object_put(&mapping->obj->base.base);
|
|
+ panfrost_mmu_ctx_put(mapping->mmu);
|
|
kfree(mapping);
|
|
}
|
|
|
|
@@ -143,11 +141,11 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
|
|
else
|
|
align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
|
|
|
|
- mapping->mmu = &priv->mmu;
|
|
- spin_lock(&priv->mm_lock);
|
|
- ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
|
|
+ mapping->mmu = panfrost_mmu_ctx_get(priv->mmu);
|
|
+ spin_lock(&mapping->mmu->mm_lock);
|
|
+ ret = drm_mm_insert_node_generic(&mapping->mmu->mm, &mapping->mmnode,
|
|
size >> PAGE_SHIFT, align, color, 0);
|
|
- spin_unlock(&priv->mm_lock);
|
|
+ spin_unlock(&mapping->mmu->mm_lock);
|
|
if (ret)
|
|
goto err;
|
|
|
|
@@ -176,7 +174,7 @@ void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
|
|
|
|
mutex_lock(&bo->mappings.lock);
|
|
list_for_each_entry(iter, &bo->mappings.list, node) {
|
|
- if (iter->mmu == &priv->mmu) {
|
|
+ if (iter->mmu == priv->mmu) {
|
|
mapping = iter;
|
|
list_del(&iter->node);
|
|
break;
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
|
|
index 04e6f6f9b742e..7e1a5664d4525 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
|
|
@@ -165,7 +165,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
|
|
return;
|
|
}
|
|
|
|
- cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
|
|
+ cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
|
|
|
|
job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
|
|
job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
|
|
@@ -524,7 +524,7 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
|
|
if (job) {
|
|
pfdev->jobs[j] = NULL;
|
|
|
|
- panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
|
|
+ panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
|
|
panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
|
|
|
|
dma_fence_signal_locked(job->done_fence);
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
|
|
index 1986862163178..7fc45b13a52c2 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
|
|
@@ -1,5 +1,8 @@
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
|
|
+
|
|
+#include <drm/panfrost_drm.h>
|
|
+
|
|
#include <linux/atomic.h>
|
|
#include <linux/bitfield.h>
|
|
#include <linux/delay.h>
|
|
@@ -52,25 +55,16 @@ static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
|
|
}
|
|
|
|
static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
|
|
- u64 iova, size_t size)
|
|
+ u64 iova, u64 size)
|
|
{
|
|
u8 region_width;
|
|
u64 region = iova & PAGE_MASK;
|
|
- /*
|
|
- * fls returns:
|
|
- * 1 .. 32
|
|
- *
|
|
- * 10 + fls(num_pages)
|
|
- * results in the range (11 .. 42)
|
|
- */
|
|
-
|
|
- size = round_up(size, PAGE_SIZE);
|
|
|
|
- region_width = 10 + fls(size >> PAGE_SHIFT);
|
|
- if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
|
|
- /* not pow2, so must go up to the next pow2 */
|
|
- region_width += 1;
|
|
- }
|
|
+ /* The size is encoded as ceil(log2) minus(1), which may be calculated
|
|
+ * with fls. The size must be clamped to hardware bounds.
|
|
+ */
|
|
+ size = max_t(u64, size, AS_LOCK_REGION_MIN_SIZE);
|
|
+ region_width = fls64(size - 1) - 1;
|
|
region |= region_width;
|
|
|
|
/* Lock the region that needs to be updated */
|
|
@@ -81,7 +75,7 @@ static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
|
|
|
|
|
|
static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
|
|
- u64 iova, size_t size, u32 op)
|
|
+ u64 iova, u64 size, u32 op)
|
|
{
|
|
if (as_nr < 0)
|
|
return 0;
|
|
@@ -98,7 +92,7 @@ static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
|
|
|
|
static int mmu_hw_do_operation(struct panfrost_device *pfdev,
|
|
struct panfrost_mmu *mmu,
|
|
- u64 iova, size_t size, u32 op)
|
|
+ u64 iova, u64 size, u32 op)
|
|
{
|
|
int ret;
|
|
|
|
@@ -115,7 +109,7 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
|
|
u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
|
|
u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
|
|
|
|
- mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
|
|
+ mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
|
|
|
|
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
|
|
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
|
|
@@ -131,7 +125,7 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
|
|
|
|
static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
|
|
{
|
|
- mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
|
|
+ mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
|
|
|
|
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
|
|
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
|
|
@@ -231,7 +225,7 @@ static size_t get_pgsize(u64 addr, size_t size)
|
|
|
|
static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
|
|
struct panfrost_mmu *mmu,
|
|
- u64 iova, size_t size)
|
|
+ u64 iova, u64 size)
|
|
{
|
|
if (mmu->as < 0)
|
|
return;
|
|
@@ -337,7 +331,7 @@ static void mmu_tlb_inv_context_s1(void *cookie)
|
|
|
|
static void mmu_tlb_sync_context(void *cookie)
|
|
{
|
|
- //struct panfrost_device *pfdev = cookie;
|
|
+ //struct panfrost_mmu *mmu = cookie;
|
|
// TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
|
|
}
|
|
|
|
@@ -359,57 +353,10 @@ static const struct iommu_flush_ops mmu_tlb_ops = {
|
|
.tlb_flush_leaf = mmu_tlb_flush_leaf,
|
|
};
|
|
|
|
-int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
|
|
-{
|
|
- struct panfrost_mmu *mmu = &priv->mmu;
|
|
- struct panfrost_device *pfdev = priv->pfdev;
|
|
-
|
|
- INIT_LIST_HEAD(&mmu->list);
|
|
- mmu->as = -1;
|
|
-
|
|
- mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
|
|
- .pgsize_bitmap = SZ_4K | SZ_2M,
|
|
- .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
|
|
- .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
|
|
- .coherent_walk = pfdev->coherent,
|
|
- .tlb = &mmu_tlb_ops,
|
|
- .iommu_dev = pfdev->dev,
|
|
- };
|
|
-
|
|
- mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
|
|
- priv);
|
|
- if (!mmu->pgtbl_ops)
|
|
- return -EINVAL;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
|
|
-{
|
|
- struct panfrost_device *pfdev = priv->pfdev;
|
|
- struct panfrost_mmu *mmu = &priv->mmu;
|
|
-
|
|
- spin_lock(&pfdev->as_lock);
|
|
- if (mmu->as >= 0) {
|
|
- pm_runtime_get_noresume(pfdev->dev);
|
|
- if (pm_runtime_active(pfdev->dev))
|
|
- panfrost_mmu_disable(pfdev, mmu->as);
|
|
- pm_runtime_put_autosuspend(pfdev->dev);
|
|
-
|
|
- clear_bit(mmu->as, &pfdev->as_alloc_mask);
|
|
- clear_bit(mmu->as, &pfdev->as_in_use_mask);
|
|
- list_del(&mmu->list);
|
|
- }
|
|
- spin_unlock(&pfdev->as_lock);
|
|
-
|
|
- free_io_pgtable_ops(mmu->pgtbl_ops);
|
|
-}
|
|
-
|
|
static struct panfrost_gem_mapping *
|
|
addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
|
|
{
|
|
struct panfrost_gem_mapping *mapping = NULL;
|
|
- struct panfrost_file_priv *priv;
|
|
struct drm_mm_node *node;
|
|
u64 offset = addr >> PAGE_SHIFT;
|
|
struct panfrost_mmu *mmu;
|
|
@@ -422,11 +369,10 @@ addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
|
|
goto out;
|
|
|
|
found_mmu:
|
|
- priv = container_of(mmu, struct panfrost_file_priv, mmu);
|
|
|
|
- spin_lock(&priv->mm_lock);
|
|
+ spin_lock(&mmu->mm_lock);
|
|
|
|
- drm_mm_for_each_node(node, &priv->mm) {
|
|
+ drm_mm_for_each_node(node, &mmu->mm) {
|
|
if (offset >= node->start &&
|
|
offset < (node->start + node->size)) {
|
|
mapping = drm_mm_node_to_panfrost_mapping(node);
|
|
@@ -436,7 +382,7 @@ found_mmu:
|
|
}
|
|
}
|
|
|
|
- spin_unlock(&priv->mm_lock);
|
|
+ spin_unlock(&mmu->mm_lock);
|
|
out:
|
|
spin_unlock(&pfdev->as_lock);
|
|
return mapping;
|
|
@@ -549,6 +495,107 @@ err_bo:
|
|
return ret;
|
|
}
|
|
|
|
+static void panfrost_mmu_release_ctx(struct kref *kref)
|
|
+{
|
|
+ struct panfrost_mmu *mmu = container_of(kref, struct panfrost_mmu,
|
|
+ refcount);
|
|
+ struct panfrost_device *pfdev = mmu->pfdev;
|
|
+
|
|
+ spin_lock(&pfdev->as_lock);
|
|
+ if (mmu->as >= 0) {
|
|
+ pm_runtime_get_noresume(pfdev->dev);
|
|
+ if (pm_runtime_active(pfdev->dev))
|
|
+ panfrost_mmu_disable(pfdev, mmu->as);
|
|
+ pm_runtime_put_autosuspend(pfdev->dev);
|
|
+
|
|
+ clear_bit(mmu->as, &pfdev->as_alloc_mask);
|
|
+ clear_bit(mmu->as, &pfdev->as_in_use_mask);
|
|
+ list_del(&mmu->list);
|
|
+ }
|
|
+ spin_unlock(&pfdev->as_lock);
|
|
+
|
|
+ free_io_pgtable_ops(mmu->pgtbl_ops);
|
|
+ drm_mm_takedown(&mmu->mm);
|
|
+ kfree(mmu);
|
|
+}
|
|
+
|
|
+void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu)
|
|
+{
|
|
+ kref_put(&mmu->refcount, panfrost_mmu_release_ctx);
|
|
+}
|
|
+
|
|
+struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu)
|
|
+{
|
|
+ kref_get(&mmu->refcount);
|
|
+
|
|
+ return mmu;
|
|
+}
|
|
+
|
|
+#define PFN_4G (SZ_4G >> PAGE_SHIFT)
|
|
+#define PFN_4G_MASK (PFN_4G - 1)
|
|
+#define PFN_16M (SZ_16M >> PAGE_SHIFT)
|
|
+
|
|
+static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
|
|
+ unsigned long color,
|
|
+ u64 *start, u64 *end)
|
|
+{
|
|
+ /* Executable buffers can't start or end on a 4GB boundary */
|
|
+ if (!(color & PANFROST_BO_NOEXEC)) {
|
|
+ u64 next_seg;
|
|
+
|
|
+ if ((*start & PFN_4G_MASK) == 0)
|
|
+ (*start)++;
|
|
+
|
|
+ if ((*end & PFN_4G_MASK) == 0)
|
|
+ (*end)--;
|
|
+
|
|
+ next_seg = ALIGN(*start, PFN_4G);
|
|
+ if (next_seg - *start <= PFN_16M)
|
|
+ *start = next_seg + 1;
|
|
+
|
|
+ *end = min(*end, ALIGN(*start, PFN_4G) - 1);
|
|
+ }
|
|
+}
|
|
+
|
|
+struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
|
|
+{
|
|
+ struct panfrost_mmu *mmu;
|
|
+
|
|
+ mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
|
|
+ if (!mmu)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ mmu->pfdev = pfdev;
|
|
+ spin_lock_init(&mmu->mm_lock);
|
|
+
|
|
+ /* 4G enough for now. can be 48-bit */
|
|
+ drm_mm_init(&mmu->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
|
|
+ mmu->mm.color_adjust = panfrost_drm_mm_color_adjust;
|
|
+
|
|
+ INIT_LIST_HEAD(&mmu->list);
|
|
+ mmu->as = -1;
|
|
+
|
|
+ mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
|
|
+ .pgsize_bitmap = SZ_4K | SZ_2M,
|
|
+ .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
|
|
+ .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
|
|
+ .coherent_walk = pfdev->coherent,
|
|
+ .tlb = &mmu_tlb_ops,
|
|
+ .iommu_dev = pfdev->dev,
|
|
+ };
|
|
+
|
|
+ mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
|
|
+ mmu);
|
|
+ if (!mmu->pgtbl_ops) {
|
|
+ kfree(mmu);
|
|
+ return ERR_PTR(-EINVAL);
|
|
+ }
|
|
+
|
|
+ kref_init(&mmu->refcount);
|
|
+
|
|
+ return mmu;
|
|
+}
|
|
+
|
|
static const char *access_type_name(struct panfrost_device *pfdev,
|
|
u32 fault_status)
|
|
{
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
|
|
index 44fc2edf63ce6..cc2a0d307febc 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
|
|
@@ -18,7 +18,8 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev);
|
|
u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
|
|
void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
|
|
|
|
-int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv);
|
|
-void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv);
|
|
+struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu);
|
|
+void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu);
|
|
+struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev);
|
|
|
|
#endif
|
|
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
|
|
index eddaa62ad8b0e..2ae3a4d301d39 100644
|
|
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
|
|
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
|
|
@@ -318,6 +318,8 @@
|
|
#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2 << 8)
|
|
#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
|
|
|
|
+#define AS_LOCK_REGION_MIN_SIZE (1ULL << 15)
|
|
+
|
|
#define gpu_write(dev, reg, data) writel(data, dev->iomem + reg)
|
|
#define gpu_read(dev, reg) readl(dev->iomem + reg)
|
|
|
|
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
|
|
index c58b8840090ab..ee293f061f0a8 100644
|
|
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
|
|
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
|
|
@@ -1074,7 +1074,9 @@ static int vc4_hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd,
|
|
HDMI_WRITE(HDMI_MAI_CTL,
|
|
VC4_SET_FIELD(vc4_hdmi->audio.channels,
|
|
VC4_HD_MAI_CTL_CHNUM) |
|
|
- VC4_HD_MAI_CTL_ENABLE);
|
|
+ VC4_HD_MAI_CTL_WHOLSMP |
|
|
+ VC4_HD_MAI_CTL_CHALIGN |
|
|
+ VC4_HD_MAI_CTL_ENABLE);
|
|
break;
|
|
case SNDRV_PCM_TRIGGER_STOP:
|
|
HDMI_WRITE(HDMI_MAI_CTL,
|
|
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
|
|
index f493b20c7a38c..f1a51371de5b1 100644
|
|
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
|
|
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
|
|
@@ -866,7 +866,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|
user_srf->prime.base.shareable = false;
|
|
user_srf->prime.base.tfile = NULL;
|
|
if (drm_is_primary_client(file_priv))
|
|
- user_srf->master = drm_master_get(file_priv->master);
|
|
+ user_srf->master = drm_file_get_master(file_priv);
|
|
|
|
/**
|
|
* From this point, the generic resource management functions
|
|
@@ -1537,7 +1537,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
|
|
|
|
user_srf = container_of(srf, struct vmw_user_surface, srf);
|
|
if (drm_is_primary_client(file_priv))
|
|
- user_srf->master = drm_master_get(file_priv->master);
|
|
+ user_srf->master = drm_file_get_master(file_priv);
|
|
|
|
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
|
|
if (unlikely(ret != 0))
|
|
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
|
|
index 8cd8af35cfaac..205c72a249b75 100644
|
|
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
|
|
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
|
|
@@ -1447,9 +1447,10 @@ zynqmp_disp_crtc_atomic_enable(struct drm_crtc *crtc,
|
|
struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
|
|
int ret, vrefresh;
|
|
|
|
+ pm_runtime_get_sync(disp->dev);
|
|
+
|
|
zynqmp_disp_crtc_setup_clock(crtc, adjusted_mode);
|
|
|
|
- pm_runtime_get_sync(disp->dev);
|
|
ret = clk_prepare_enable(disp->pclk);
|
|
if (ret) {
|
|
dev_err(disp->dev, "failed to enable a pixel clock\n");
|
|
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
|
|
index 59d1fb017da01..13811332b349f 100644
|
|
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
|
|
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
|
|
@@ -402,10 +402,6 @@ static int zynqmp_dp_phy_init(struct zynqmp_dp *dp)
|
|
}
|
|
}
|
|
|
|
- ret = zynqmp_dp_reset(dp, false);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
-
|
|
zynqmp_dp_clr(dp, ZYNQMP_DP_PHY_RESET, ZYNQMP_DP_PHY_RESET_ALL_RESET);
|
|
|
|
/*
|
|
@@ -441,8 +437,6 @@ static void zynqmp_dp_phy_exit(struct zynqmp_dp *dp)
|
|
ret);
|
|
}
|
|
|
|
- zynqmp_dp_reset(dp, true);
|
|
-
|
|
for (i = 0; i < dp->num_lanes; i++) {
|
|
ret = phy_exit(dp->phy[i]);
|
|
if (ret)
|
|
@@ -1682,9 +1676,13 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub, struct drm_device *drm)
|
|
return PTR_ERR(dp->reset);
|
|
}
|
|
|
|
+ ret = zynqmp_dp_reset(dp, false);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
ret = zynqmp_dp_phy_probe(dp);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto err_reset;
|
|
|
|
/* Initialize the hardware. */
|
|
zynqmp_dp_write(dp, ZYNQMP_DP_TX_PHY_POWER_DOWN,
|
|
@@ -1696,7 +1694,7 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub, struct drm_device *drm)
|
|
|
|
ret = zynqmp_dp_phy_init(dp);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto err_reset;
|
|
|
|
zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMITTER_ENABLE, 1);
|
|
|
|
@@ -1708,15 +1706,18 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub, struct drm_device *drm)
|
|
zynqmp_dp_irq_handler, IRQF_ONESHOT,
|
|
dev_name(dp->dev), dp);
|
|
if (ret < 0)
|
|
- goto error;
|
|
+ goto err_phy_exit;
|
|
|
|
dev_dbg(dp->dev, "ZynqMP DisplayPort Tx probed with %u lanes\n",
|
|
dp->num_lanes);
|
|
|
|
return 0;
|
|
|
|
-error:
|
|
+err_phy_exit:
|
|
zynqmp_dp_phy_exit(dp);
|
|
+err_reset:
|
|
+ zynqmp_dp_reset(dp, true);
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
@@ -1734,4 +1735,5 @@ void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub)
|
|
zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, 0xffffffff);
|
|
|
|
zynqmp_dp_phy_exit(dp);
|
|
+ zynqmp_dp_reset(dp, true);
|
|
}
|
|
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
|
|
index d1ab2dccf6fd7..580d378342c41 100644
|
|
--- a/drivers/hid/hid-input.c
|
|
+++ b/drivers/hid/hid-input.c
|
|
@@ -415,8 +415,6 @@ static int hidinput_get_battery_property(struct power_supply *psy,
|
|
|
|
if (dev->battery_status == HID_BATTERY_UNKNOWN)
|
|
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
|
|
- else if (dev->battery_capacity == 100)
|
|
- val->intval = POWER_SUPPLY_STATUS_FULL;
|
|
else
|
|
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
|
|
break;
|
|
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
|
|
index 1f08c848c33de..998aad8a9e608 100644
|
|
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
|
|
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
|
|
@@ -176,8 +176,6 @@ static const struct i2c_hid_quirks {
|
|
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
|
|
{ I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_3118,
|
|
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
|
|
- { USB_VENDOR_ID_ELAN, HID_ANY_ID,
|
|
- I2C_HID_QUIRK_BOGUS_IRQ },
|
|
{ USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
|
|
I2C_HID_QUIRK_RESET_ON_RESUME },
|
|
{ I2C_VENDOR_ID_SYNAPTICS, I2C_PRODUCT_ID_SYNAPTICS_SYNA2393,
|
|
@@ -188,7 +186,8 @@ static const struct i2c_hid_quirks {
|
|
* Sending the wakeup after reset actually break ELAN touchscreen controller
|
|
*/
|
|
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
|
|
- I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET },
|
|
+ I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET |
|
|
+ I2C_HID_QUIRK_BOGUS_IRQ },
|
|
{ 0, 0 }
|
|
};
|
|
|
|
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
|
|
index 2fb7540ee952b..79bc2032dcb2a 100644
|
|
--- a/drivers/hwmon/pmbus/ibm-cffps.c
|
|
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
|
|
@@ -50,9 +50,9 @@
|
|
#define CFFPS_MFR_VAUX_FAULT BIT(6)
|
|
#define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7)
|
|
|
|
-#define CFFPS_LED_BLINK BIT(0)
|
|
-#define CFFPS_LED_ON BIT(1)
|
|
-#define CFFPS_LED_OFF BIT(2)
|
|
+#define CFFPS_LED_BLINK (BIT(0) | BIT(6))
|
|
+#define CFFPS_LED_ON (BIT(1) | BIT(6))
|
|
+#define CFFPS_LED_OFF (BIT(2) | BIT(6))
|
|
#define CFFPS_BLINK_RATE_MS 250
|
|
|
|
enum {
|
|
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
|
|
index 2b2b8edfd258c..ab4997bfd6d45 100644
|
|
--- a/drivers/iio/dac/ad5624r_spi.c
|
|
+++ b/drivers/iio/dac/ad5624r_spi.c
|
|
@@ -229,7 +229,7 @@ static int ad5624r_probe(struct spi_device *spi)
|
|
if (!indio_dev)
|
|
return -ENOMEM;
|
|
st = iio_priv(indio_dev);
|
|
- st->reg = devm_regulator_get(&spi->dev, "vcc");
|
|
+ st->reg = devm_regulator_get_optional(&spi->dev, "vref");
|
|
if (!IS_ERR(st->reg)) {
|
|
ret = regulator_enable(st->reg);
|
|
if (ret)
|
|
@@ -240,6 +240,22 @@ static int ad5624r_probe(struct spi_device *spi)
|
|
goto error_disable_reg;
|
|
|
|
voltage_uv = ret;
|
|
+ } else {
|
|
+ if (PTR_ERR(st->reg) != -ENODEV)
|
|
+ return PTR_ERR(st->reg);
|
|
+ /* Backwards compatibility. This naming is not correct */
|
|
+ st->reg = devm_regulator_get_optional(&spi->dev, "vcc");
|
|
+ if (!IS_ERR(st->reg)) {
|
|
+ ret = regulator_enable(st->reg);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = regulator_get_voltage(st->reg);
|
|
+ if (ret < 0)
|
|
+ goto error_disable_reg;
|
|
+
|
|
+ voltage_uv = ret;
|
|
+ }
|
|
}
|
|
|
|
spi_set_drvdata(spi, indio_dev);
|
|
diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
|
|
index 3b5ba26d7d867..3b4a0e60e6059 100644
|
|
--- a/drivers/iio/temperature/ltc2983.c
|
|
+++ b/drivers/iio/temperature/ltc2983.c
|
|
@@ -89,6 +89,8 @@
|
|
|
|
#define LTC2983_STATUS_START_MASK BIT(7)
|
|
#define LTC2983_STATUS_START(x) FIELD_PREP(LTC2983_STATUS_START_MASK, x)
|
|
+#define LTC2983_STATUS_UP_MASK GENMASK(7, 6)
|
|
+#define LTC2983_STATUS_UP(reg) FIELD_GET(LTC2983_STATUS_UP_MASK, reg)
|
|
|
|
#define LTC2983_STATUS_CHAN_SEL_MASK GENMASK(4, 0)
|
|
#define LTC2983_STATUS_CHAN_SEL(x) \
|
|
@@ -1362,17 +1364,16 @@ put_child:
|
|
|
|
static int ltc2983_setup(struct ltc2983_data *st, bool assign_iio)
|
|
{
|
|
- u32 iio_chan_t = 0, iio_chan_v = 0, chan, iio_idx = 0;
|
|
+ u32 iio_chan_t = 0, iio_chan_v = 0, chan, iio_idx = 0, status;
|
|
int ret;
|
|
- unsigned long time;
|
|
-
|
|
- /* make sure the device is up */
|
|
- time = wait_for_completion_timeout(&st->completion,
|
|
- msecs_to_jiffies(250));
|
|
|
|
- if (!time) {
|
|
+ /* make sure the device is up: start bit (7) is 0 and done bit (6) is 1 */
|
|
+ ret = regmap_read_poll_timeout(st->regmap, LTC2983_STATUS_REG, status,
|
|
+ LTC2983_STATUS_UP(status) == 1, 25000,
|
|
+ 25000 * 10);
|
|
+ if (ret) {
|
|
dev_err(&st->spi->dev, "Device startup timed out\n");
|
|
- return -ETIMEDOUT;
|
|
+ return ret;
|
|
}
|
|
|
|
st->iio_chan = devm_kzalloc(&st->spi->dev,
|
|
@@ -1492,10 +1493,11 @@ static int ltc2983_probe(struct spi_device *spi)
|
|
ret = ltc2983_parse_dt(st);
|
|
if (ret)
|
|
return ret;
|
|
- /*
|
|
- * let's request the irq now so it is used to sync the device
|
|
- * startup in ltc2983_setup()
|
|
- */
|
|
+
|
|
+ ret = ltc2983_setup(st, true);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
ret = devm_request_irq(&spi->dev, spi->irq, ltc2983_irq_handler,
|
|
IRQF_TRIGGER_RISING, name, st);
|
|
if (ret) {
|
|
@@ -1503,10 +1505,6 @@ static int ltc2983_probe(struct spi_device *spi)
|
|
return ret;
|
|
}
|
|
|
|
- ret = ltc2983_setup(st, true);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
indio_dev->name = name;
|
|
indio_dev->num_channels = st->iio_channels;
|
|
indio_dev->channels = st->iio_chan;
|
|
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
|
|
index da8adadf47559..75b6da00065a3 100644
|
|
--- a/drivers/infiniband/core/iwcm.c
|
|
+++ b/drivers/infiniband/core/iwcm.c
|
|
@@ -1187,29 +1187,34 @@ static int __init iw_cm_init(void)
|
|
|
|
ret = iwpm_init(RDMA_NL_IWCM);
|
|
if (ret)
|
|
- pr_err("iw_cm: couldn't init iwpm\n");
|
|
- else
|
|
- rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
|
|
+ return ret;
|
|
+
|
|
iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0);
|
|
if (!iwcm_wq)
|
|
- return -ENOMEM;
|
|
+ goto err_alloc;
|
|
|
|
iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
|
|
iwcm_ctl_table);
|
|
if (!iwcm_ctl_table_hdr) {
|
|
pr_err("iw_cm: couldn't register sysctl paths\n");
|
|
- destroy_workqueue(iwcm_wq);
|
|
- return -ENOMEM;
|
|
+ goto err_sysctl;
|
|
}
|
|
|
|
+ rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
|
|
return 0;
|
|
+
|
|
+err_sysctl:
|
|
+ destroy_workqueue(iwcm_wq);
|
|
+err_alloc:
|
|
+ iwpm_exit(RDMA_NL_IWCM);
|
|
+ return -ENOMEM;
|
|
}
|
|
|
|
static void __exit iw_cm_cleanup(void)
|
|
{
|
|
+ rdma_nl_unregister(RDMA_NL_IWCM);
|
|
unregister_net_sysctl_table(iwcm_ctl_table_hdr);
|
|
destroy_workqueue(iwcm_wq);
|
|
- rdma_nl_unregister(RDMA_NL_IWCM);
|
|
iwpm_exit(RDMA_NL_IWCM);
|
|
}
|
|
|
|
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
|
|
index 4e940fc50bba6..2ece682c7835b 100644
|
|
--- a/drivers/infiniband/hw/efa/efa_verbs.c
|
|
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
|
|
@@ -717,7 +717,6 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
|
|
|
|
qp->qp_handle = create_qp_resp.qp_handle;
|
|
qp->ibqp.qp_num = create_qp_resp.qp_num;
|
|
- qp->ibqp.qp_type = init_attr->qp_type;
|
|
qp->max_send_wr = init_attr->cap.max_send_wr;
|
|
qp->max_recv_wr = init_attr->cap.max_recv_wr;
|
|
qp->max_send_sge = init_attr->cap.max_send_sge;
|
|
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
|
|
index 786c6316273f7..b6e453e9ba236 100644
|
|
--- a/drivers/infiniband/hw/hfi1/init.c
|
|
+++ b/drivers/infiniband/hw/hfi1/init.c
|
|
@@ -650,12 +650,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
|
|
|
|
ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
|
|
ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
|
|
-
|
|
- if (loopback) {
|
|
- dd_dev_err(dd, "Faking data partition 0x8001 in idx %u\n",
|
|
- !default_pkey_idx);
|
|
- ppd->pkeys[!default_pkey_idx] = 0x8001;
|
|
- }
|
|
+ ppd->pkeys[0] = 0x8001;
|
|
|
|
INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
|
|
INIT_WORK(&ppd->link_up_work, handle_link_up);
|
|
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
|
|
index ef1452215b17d..7ce9ad8aee1ec 100644
|
|
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
|
|
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
|
|
@@ -740,7 +740,6 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
|
|
goto err_out;
|
|
}
|
|
hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
|
|
- resp->cap_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
|
|
}
|
|
|
|
if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
|
|
@@ -752,7 +751,6 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
|
|
goto err_sdb;
|
|
}
|
|
hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
|
|
- resp->cap_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
|
|
}
|
|
} else {
|
|
/* QP doorbell register address */
|
|
@@ -959,6 +957,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
|
|
}
|
|
|
|
if (udata) {
|
|
+ resp.cap_flags = hr_qp->en_flags;
|
|
ret = ib_copy_to_udata(udata, &resp,
|
|
min(udata->outlen, sizeof(resp)));
|
|
if (ret) {
|
|
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
|
|
index 8beba002e5dd7..011477356a1de 100644
|
|
--- a/drivers/infiniband/hw/mlx5/qp.c
|
|
+++ b/drivers/infiniband/hw/mlx5/qp.c
|
|
@@ -1842,7 +1842,6 @@ static int get_atomic_mode(struct mlx5_ib_dev *dev,
|
|
static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|
struct mlx5_create_qp_params *params)
|
|
{
|
|
- struct mlx5_ib_create_qp *ucmd = params->ucmd;
|
|
struct ib_qp_init_attr *attr = params->attr;
|
|
u32 uidx = params->uidx;
|
|
struct mlx5_ib_resources *devr = &dev->devr;
|
|
@@ -1862,8 +1861,6 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|
if (!in)
|
|
return -ENOMEM;
|
|
|
|
- if (MLX5_CAP_GEN(mdev, ece_support) && ucmd)
|
|
- MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
|
|
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
|
|
|
|
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_XRC);
|
|
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
|
|
index 30cb30046b15e..35963e6bf9fab 100644
|
|
--- a/drivers/iommu/intel/pasid.h
|
|
+++ b/drivers/iommu/intel/pasid.h
|
|
@@ -28,12 +28,12 @@
|
|
#define VCMD_CMD_ALLOC 0x1
|
|
#define VCMD_CMD_FREE 0x2
|
|
#define VCMD_VRSP_IP 0x1
|
|
-#define VCMD_VRSP_SC(e) (((e) >> 1) & 0x3)
|
|
+#define VCMD_VRSP_SC(e) (((e) & 0xff) >> 1)
|
|
#define VCMD_VRSP_SC_SUCCESS 0
|
|
-#define VCMD_VRSP_SC_NO_PASID_AVAIL 2
|
|
-#define VCMD_VRSP_SC_INVALID_PASID 2
|
|
-#define VCMD_VRSP_RESULT_PASID(e) (((e) >> 8) & 0xfffff)
|
|
-#define VCMD_CMD_OPERAND(e) ((e) << 8)
|
|
+#define VCMD_VRSP_SC_NO_PASID_AVAIL 16
|
|
+#define VCMD_VRSP_SC_INVALID_PASID 16
|
|
+#define VCMD_VRSP_RESULT_PASID(e) (((e) >> 16) & 0xfffff)
|
|
+#define VCMD_CMD_OPERAND(e) ((e) << 16)
|
|
/*
|
|
* Domain ID reserved for pasid entries programmed for first-level
|
|
* only and pass-through transfer modes.
|
|
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
|
|
index 5665b6ea8119f..75378e35c3d66 100644
|
|
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
|
|
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
|
|
@@ -168,7 +168,8 @@ static void cmdq_task_insert_into_thread(struct cmdq_task *task)
|
|
dma_sync_single_for_cpu(dev, prev_task->pa_base,
|
|
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
|
|
prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
|
|
- (u64)CMDQ_JUMP_BY_PA << 32 | task->pa_base;
|
|
+ (u64)CMDQ_JUMP_BY_PA << 32 |
|
|
+ (task->pa_base >> task->cmdq->shift_pa);
|
|
dma_sync_single_for_device(dev, prev_task->pa_base,
|
|
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
|
|
|
|
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
|
|
index 70ae6f3aede94..2aa4acd33af39 100644
|
|
--- a/drivers/md/dm-crypt.c
|
|
+++ b/drivers/md/dm-crypt.c
|
|
@@ -2643,7 +2643,12 @@ static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
|
|
struct crypt_config *cc = pool_data;
|
|
struct page *page;
|
|
|
|
- if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) &&
|
|
+ /*
|
|
+ * Note, percpu_counter_read_positive() may over (and under) estimate
|
|
+ * the current usage by at most (batch - 1) * num_online_cpus() pages,
|
|
+ * but avoids potential spinlock contention of an exact result.
|
|
+ */
|
|
+ if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) >= dm_crypt_pages_per_client) &&
|
|
likely(gfp_mask & __GFP_NORETRY))
|
|
return NULL;
|
|
|
|
diff --git a/drivers/media/cec/platform/stm32/stm32-cec.c b/drivers/media/cec/platform/stm32/stm32-cec.c
|
|
index ea4b1ebfca991..0ffd89712536b 100644
|
|
--- a/drivers/media/cec/platform/stm32/stm32-cec.c
|
|
+++ b/drivers/media/cec/platform/stm32/stm32-cec.c
|
|
@@ -305,14 +305,16 @@ static int stm32_cec_probe(struct platform_device *pdev)
|
|
|
|
cec->clk_hdmi_cec = devm_clk_get(&pdev->dev, "hdmi-cec");
|
|
if (IS_ERR(cec->clk_hdmi_cec) &&
|
|
- PTR_ERR(cec->clk_hdmi_cec) == -EPROBE_DEFER)
|
|
- return -EPROBE_DEFER;
|
|
+ PTR_ERR(cec->clk_hdmi_cec) == -EPROBE_DEFER) {
|
|
+ ret = -EPROBE_DEFER;
|
|
+ goto err_unprepare_cec_clk;
|
|
+ }
|
|
|
|
if (!IS_ERR(cec->clk_hdmi_cec)) {
|
|
ret = clk_prepare(cec->clk_hdmi_cec);
|
|
if (ret) {
|
|
dev_err(&pdev->dev, "Can't prepare hdmi-cec clock\n");
|
|
- return ret;
|
|
+ goto err_unprepare_cec_clk;
|
|
}
|
|
}
|
|
|
|
@@ -324,19 +326,27 @@ static int stm32_cec_probe(struct platform_device *pdev)
|
|
CEC_NAME, caps, CEC_MAX_LOG_ADDRS);
|
|
ret = PTR_ERR_OR_ZERO(cec->adap);
|
|
if (ret)
|
|
- return ret;
|
|
+ goto err_unprepare_hdmi_cec_clk;
|
|
|
|
ret = cec_register_adapter(cec->adap, &pdev->dev);
|
|
- if (ret) {
|
|
- cec_delete_adapter(cec->adap);
|
|
- return ret;
|
|
- }
|
|
+ if (ret)
|
|
+ goto err_delete_adapter;
|
|
|
|
cec_hw_init(cec);
|
|
|
|
platform_set_drvdata(pdev, cec);
|
|
|
|
return 0;
|
|
+
|
|
+err_delete_adapter:
|
|
+ cec_delete_adapter(cec->adap);
|
|
+
|
|
+err_unprepare_hdmi_cec_clk:
|
|
+ clk_unprepare(cec->clk_hdmi_cec);
|
|
+
|
|
+err_unprepare_cec_clk:
|
|
+ clk_unprepare(cec->clk_cec);
|
|
+ return ret;
|
|
}
|
|
|
|
static int stm32_cec_remove(struct platform_device *pdev)
|
|
diff --git a/drivers/media/cec/platform/tegra/tegra_cec.c b/drivers/media/cec/platform/tegra/tegra_cec.c
|
|
index 1ac0c70a59818..5e907395ca2e5 100644
|
|
--- a/drivers/media/cec/platform/tegra/tegra_cec.c
|
|
+++ b/drivers/media/cec/platform/tegra/tegra_cec.c
|
|
@@ -366,7 +366,11 @@ static int tegra_cec_probe(struct platform_device *pdev)
|
|
return -ENOENT;
|
|
}
|
|
|
|
- clk_prepare_enable(cec->clk);
|
|
+ ret = clk_prepare_enable(cec->clk);
|
|
+ if (ret) {
|
|
+ dev_err(&pdev->dev, "Unable to prepare clock for CEC\n");
|
|
+ return ret;
|
|
+ }
|
|
|
|
/* set context info. */
|
|
cec->dev = &pdev->dev;
|
|
@@ -446,9 +450,7 @@ static int tegra_cec_resume(struct platform_device *pdev)
|
|
|
|
dev_notice(&pdev->dev, "Resuming\n");
|
|
|
|
- clk_prepare_enable(cec->clk);
|
|
-
|
|
- return 0;
|
|
+ return clk_prepare_enable(cec->clk);
|
|
}
|
|
#endif
|
|
|
|
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
|
|
index 082796534b0ae..bb02354a48b81 100644
|
|
--- a/drivers/media/dvb-frontends/dib8000.c
|
|
+++ b/drivers/media/dvb-frontends/dib8000.c
|
|
@@ -2107,32 +2107,55 @@ static void dib8000_load_ana_fe_coefs(struct dib8000_state *state, const s16 *an
|
|
dib8000_write_word(state, 117 + mode, ana_fe[mode]);
|
|
}
|
|
|
|
-static const u16 lut_prbs_2k[14] = {
|
|
- 0, 0x423, 0x009, 0x5C7, 0x7A6, 0x3D8, 0x527, 0x7FF, 0x79B, 0x3D6, 0x3A2, 0x53B, 0x2F4, 0x213
|
|
+static const u16 lut_prbs_2k[13] = {
|
|
+ 0x423, 0x009, 0x5C7,
|
|
+ 0x7A6, 0x3D8, 0x527,
|
|
+ 0x7FF, 0x79B, 0x3D6,
|
|
+ 0x3A2, 0x53B, 0x2F4,
|
|
+ 0x213
|
|
};
|
|
-static const u16 lut_prbs_4k[14] = {
|
|
- 0, 0x208, 0x0C3, 0x7B9, 0x423, 0x5C7, 0x3D8, 0x7FF, 0x3D6, 0x53B, 0x213, 0x029, 0x0D0, 0x48E
|
|
+
|
|
+static const u16 lut_prbs_4k[13] = {
|
|
+ 0x208, 0x0C3, 0x7B9,
|
|
+ 0x423, 0x5C7, 0x3D8,
|
|
+ 0x7FF, 0x3D6, 0x53B,
|
|
+ 0x213, 0x029, 0x0D0,
|
|
+ 0x48E
|
|
};
|
|
-static const u16 lut_prbs_8k[14] = {
|
|
- 0, 0x740, 0x069, 0x7DD, 0x208, 0x7B9, 0x5C7, 0x7FF, 0x53B, 0x029, 0x48E, 0x4C4, 0x367, 0x684
|
|
+
|
|
+static const u16 lut_prbs_8k[13] = {
|
|
+ 0x740, 0x069, 0x7DD,
|
|
+ 0x208, 0x7B9, 0x5C7,
|
|
+ 0x7FF, 0x53B, 0x029,
|
|
+ 0x48E, 0x4C4, 0x367,
|
|
+ 0x684
|
|
};
|
|
|
|
static u16 dib8000_get_init_prbs(struct dib8000_state *state, u16 subchannel)
|
|
{
|
|
int sub_channel_prbs_group = 0;
|
|
+ int prbs_group;
|
|
|
|
- sub_channel_prbs_group = (subchannel / 3) + 1;
|
|
- dprintk("sub_channel_prbs_group = %d , subchannel =%d prbs = 0x%04x\n", sub_channel_prbs_group, subchannel, lut_prbs_8k[sub_channel_prbs_group]);
|
|
+ sub_channel_prbs_group = subchannel / 3;
|
|
+ if (sub_channel_prbs_group >= ARRAY_SIZE(lut_prbs_2k))
|
|
+ return 0;
|
|
|
|
switch (state->fe[0]->dtv_property_cache.transmission_mode) {
|
|
case TRANSMISSION_MODE_2K:
|
|
- return lut_prbs_2k[sub_channel_prbs_group];
|
|
+ prbs_group = lut_prbs_2k[sub_channel_prbs_group];
|
|
+ break;
|
|
case TRANSMISSION_MODE_4K:
|
|
- return lut_prbs_4k[sub_channel_prbs_group];
|
|
+ prbs_group = lut_prbs_4k[sub_channel_prbs_group];
|
|
+ break;
|
|
default:
|
|
case TRANSMISSION_MODE_8K:
|
|
- return lut_prbs_8k[sub_channel_prbs_group];
|
|
+ prbs_group = lut_prbs_8k[sub_channel_prbs_group];
|
|
}
|
|
+
|
|
+ dprintk("sub_channel_prbs_group = %d , subchannel =%d prbs = 0x%04x\n",
|
|
+ sub_channel_prbs_group, subchannel, prbs_group);
|
|
+
|
|
+ return prbs_group;
|
|
}
|
|
|
|
static void dib8000_set_13seg_channel(struct dib8000_state *state)
|
|
@@ -2409,10 +2432,8 @@ static void dib8000_set_isdbt_common_channel(struct dib8000_state *state, u8 seq
|
|
/* TSB or ISDBT ? apply it now */
|
|
if (c->isdbt_sb_mode) {
|
|
dib8000_set_sb_channel(state);
|
|
- if (c->isdbt_sb_subchannel < 14)
|
|
- init_prbs = dib8000_get_init_prbs(state, c->isdbt_sb_subchannel);
|
|
- else
|
|
- init_prbs = 0;
|
|
+ init_prbs = dib8000_get_init_prbs(state,
|
|
+ c->isdbt_sb_subchannel);
|
|
} else {
|
|
dib8000_set_13seg_channel(state);
|
|
init_prbs = 0xfff;
|
|
@@ -3004,6 +3025,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
|
|
|
|
unsigned long *timeout = &state->timeout;
|
|
unsigned long now = jiffies;
|
|
+ u16 init_prbs;
|
|
#ifdef DIB8000_AGC_FREEZE
|
|
u16 agc1, agc2;
|
|
#endif
|
|
@@ -3302,8 +3324,10 @@ static int dib8000_tune(struct dvb_frontend *fe)
|
|
break;
|
|
|
|
case CT_DEMOD_STEP_11: /* 41 : init prbs autosearch */
|
|
- if (state->subchannel <= 41) {
|
|
- dib8000_set_subchannel_prbs(state, dib8000_get_init_prbs(state, state->subchannel));
|
|
+ init_prbs = dib8000_get_init_prbs(state, state->subchannel);
|
|
+
|
|
+ if (init_prbs) {
|
|
+ dib8000_set_subchannel_prbs(state, init_prbs);
|
|
*tune_state = CT_DEMOD_STEP_9;
|
|
} else {
|
|
*tune_state = CT_DEMOD_STOP;
|
|
diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
|
|
index ccb55fd1d506f..e6104ee97ed29 100644
|
|
--- a/drivers/media/i2c/imx258.c
|
|
+++ b/drivers/media/i2c/imx258.c
|
|
@@ -22,7 +22,7 @@
|
|
#define IMX258_CHIP_ID 0x0258
|
|
|
|
/* V_TIMING internal */
|
|
-#define IMX258_VTS_30FPS 0x0c98
|
|
+#define IMX258_VTS_30FPS 0x0c50
|
|
#define IMX258_VTS_30FPS_2K 0x0638
|
|
#define IMX258_VTS_30FPS_VGA 0x034c
|
|
#define IMX258_VTS_MAX 0xffff
|
|
@@ -46,7 +46,7 @@
|
|
/* Analog gain control */
|
|
#define IMX258_REG_ANALOG_GAIN 0x0204
|
|
#define IMX258_ANA_GAIN_MIN 0
|
|
-#define IMX258_ANA_GAIN_MAX 0x1fff
|
|
+#define IMX258_ANA_GAIN_MAX 480
|
|
#define IMX258_ANA_GAIN_STEP 1
|
|
#define IMX258_ANA_GAIN_DEFAULT 0x0
|
|
|
|
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
|
|
index 9554c8348c020..17cc69c3227f8 100644
|
|
--- a/drivers/media/i2c/tda1997x.c
|
|
+++ b/drivers/media/i2c/tda1997x.c
|
|
@@ -1695,14 +1695,15 @@ static int tda1997x_query_dv_timings(struct v4l2_subdev *sd,
|
|
struct v4l2_dv_timings *timings)
|
|
{
|
|
struct tda1997x_state *state = to_state(sd);
|
|
+ int ret;
|
|
|
|
v4l_dbg(1, debug, state->client, "%s\n", __func__);
|
|
memset(timings, 0, sizeof(struct v4l2_dv_timings));
|
|
mutex_lock(&state->lock);
|
|
- tda1997x_detect_std(state, timings);
|
|
+ ret = tda1997x_detect_std(state, timings);
|
|
mutex_unlock(&state->lock);
|
|
|
|
- return 0;
|
|
+ return ret;
|
|
}
|
|
|
|
static const struct v4l2_subdev_video_ops tda1997x_video_ops = {
|
|
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
|
|
index 1ba3f96ffa7dc..40ab66c850f23 100644
|
|
--- a/drivers/media/rc/rc-loopback.c
|
|
+++ b/drivers/media/rc/rc-loopback.c
|
|
@@ -42,7 +42,7 @@ static int loop_set_tx_mask(struct rc_dev *dev, u32 mask)
|
|
|
|
if ((mask & (RXMASK_REGULAR | RXMASK_LEARNING)) != mask) {
|
|
dprintk("invalid tx mask: %u\n", mask);
|
|
- return -EINVAL;
|
|
+ return 2;
|
|
}
|
|
|
|
dprintk("setting tx mask: %u\n", mask);
|
|
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
|
|
index c7172b8952a96..5f0e2fa69da5c 100644
|
|
--- a/drivers/media/usb/uvc/uvc_v4l2.c
|
|
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
|
|
@@ -898,8 +898,8 @@ static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input)
|
|
{
|
|
struct uvc_fh *handle = fh;
|
|
struct uvc_video_chain *chain = handle->chain;
|
|
+ u8 *buf;
|
|
int ret;
|
|
- u8 i;
|
|
|
|
if (chain->selector == NULL ||
|
|
(chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
|
|
@@ -907,22 +907,27 @@ static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input)
|
|
return 0;
|
|
}
|
|
|
|
+ buf = kmalloc(1, GFP_KERNEL);
|
|
+ if (!buf)
|
|
+ return -ENOMEM;
|
|
+
|
|
ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, chain->selector->id,
|
|
chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
|
|
- &i, 1);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
+ buf, 1);
|
|
+ if (!ret)
|
|
+ *input = *buf - 1;
|
|
|
|
- *input = i - 1;
|
|
- return 0;
|
|
+ kfree(buf);
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
|
|
{
|
|
struct uvc_fh *handle = fh;
|
|
struct uvc_video_chain *chain = handle->chain;
|
|
+ u8 *buf;
|
|
int ret;
|
|
- u32 i;
|
|
|
|
ret = uvc_acquire_privileges(handle);
|
|
if (ret < 0)
|
|
@@ -938,10 +943,17 @@ static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
|
|
if (input >= chain->selector->bNrInPins)
|
|
return -EINVAL;
|
|
|
|
- i = input + 1;
|
|
- return uvc_query_ctrl(chain->dev, UVC_SET_CUR, chain->selector->id,
|
|
- chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
|
|
- &i, 1);
|
|
+ buf = kmalloc(1, GFP_KERNEL);
|
|
+ if (!buf)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ *buf = input + 1;
|
|
+ ret = uvc_query_ctrl(chain->dev, UVC_SET_CUR, chain->selector->id,
|
|
+ chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
|
|
+ buf, 1);
|
|
+ kfree(buf);
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
static int uvc_ioctl_queryctrl(struct file *file, void *fh,
|
|
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
|
|
index 230d65a642178..af48705c704f8 100644
|
|
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
|
|
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
|
|
@@ -196,7 +196,7 @@ bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
|
|
if (!v4l2_valid_dv_timings(t, cap, fnc, fnc_handle))
|
|
return false;
|
|
|
|
- for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) {
|
|
+ for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
|
|
if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap,
|
|
fnc, fnc_handle) &&
|
|
v4l2_match_dv_timings(t, v4l2_dv_timings_presets + i,
|
|
@@ -218,7 +218,7 @@ bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic)
|
|
{
|
|
unsigned int i;
|
|
|
|
- for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) {
|
|
+ for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
|
|
const struct v4l2_bt_timings *bt =
|
|
&v4l2_dv_timings_presets[i].bt;
|
|
|
|
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
|
|
index c2338750313c4..a49782dd903cd 100644
|
|
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
|
|
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
|
|
@@ -2238,7 +2238,8 @@ int vmci_qp_broker_map(struct vmci_handle handle,
|
|
|
|
result = VMCI_SUCCESS;
|
|
|
|
- if (context_id != VMCI_HOST_CONTEXT_ID) {
|
|
+ if (context_id != VMCI_HOST_CONTEXT_ID &&
|
|
+ !QPBROKERSTATE_HAS_MEM(entry)) {
|
|
struct vmci_qp_page_store page_store;
|
|
|
|
page_store.pages = guest_mem;
|
|
@@ -2345,7 +2346,8 @@ int vmci_qp_broker_unmap(struct vmci_handle handle,
|
|
goto out;
|
|
}
|
|
|
|
- if (context_id != VMCI_HOST_CONTEXT_ID) {
|
|
+ if (context_id != VMCI_HOST_CONTEXT_ID &&
|
|
+ QPBROKERSTATE_HAS_MEM(entry)) {
|
|
qp_acquire_queue_mutex(entry->produce_q);
|
|
result = qp_save_headers(entry);
|
|
if (result < VMCI_SUCCESS)
|
|
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
|
|
index 87bac99207023..94caee49da99c 100644
|
|
--- a/drivers/mmc/core/block.c
|
|
+++ b/drivers/mmc/core/block.c
|
|
@@ -541,6 +541,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
|
|
return mmc_sanitize(card);
|
|
|
|
mmc_wait_for_req(card->host, &mrq);
|
|
+ memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
|
|
|
|
if (cmd.error) {
|
|
dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
|
|
@@ -590,8 +591,6 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
|
|
if (idata->ic.postsleep_min_us)
|
|
usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
|
|
|
|
- memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
|
|
-
|
|
if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
|
|
/*
|
|
* Ensure RPMB/R1B command has completed by polling CMD13
|
|
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
|
|
index eb395e1442071..e00167bcfaf6d 100644
|
|
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
|
|
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
|
|
@@ -539,9 +539,22 @@ static int sd_write_long_data(struct realtek_pci_sdmmc *host,
|
|
return 0;
|
|
}
|
|
|
|
+static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
|
|
+{
|
|
+ rtsx_pci_write_register(host->pcr, SD_CFG1,
|
|
+ SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128);
|
|
+}
|
|
+
|
|
+static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host)
|
|
+{
|
|
+ rtsx_pci_write_register(host->pcr, SD_CFG1,
|
|
+ SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
|
|
+}
|
|
+
|
|
static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
|
|
{
|
|
struct mmc_data *data = mrq->data;
|
|
+ int err;
|
|
|
|
if (host->sg_count < 0) {
|
|
data->error = host->sg_count;
|
|
@@ -550,22 +563,19 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
|
|
return data->error;
|
|
}
|
|
|
|
- if (data->flags & MMC_DATA_READ)
|
|
- return sd_read_long_data(host, mrq);
|
|
+ if (data->flags & MMC_DATA_READ) {
|
|
+ if (host->initial_mode)
|
|
+ sd_disable_initial_mode(host);
|
|
|
|
- return sd_write_long_data(host, mrq);
|
|
-}
|
|
+ err = sd_read_long_data(host, mrq);
|
|
|
|
-static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
|
|
-{
|
|
- rtsx_pci_write_register(host->pcr, SD_CFG1,
|
|
- SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128);
|
|
-}
|
|
+ if (host->initial_mode)
|
|
+ sd_enable_initial_mode(host);
|
|
|
|
-static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host)
|
|
-{
|
|
- rtsx_pci_write_register(host->pcr, SD_CFG1,
|
|
- SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
|
|
+ return err;
|
|
+ }
|
|
+
|
|
+ return sd_write_long_data(host, mrq);
|
|
}
|
|
|
|
static void sd_normal_rw(struct realtek_pci_sdmmc *host,
|
|
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
|
|
index 3b8d456e857d5..fc38db64a6b48 100644
|
|
--- a/drivers/mmc/host/sdhci-of-arasan.c
|
|
+++ b/drivers/mmc/host/sdhci-of-arasan.c
|
|
@@ -159,6 +159,12 @@ struct sdhci_arasan_data {
|
|
/* Controller immediately reports SDHCI_CLOCK_INT_STABLE after enabling the
|
|
* internal clock even when the clock isn't stable */
|
|
#define SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE BIT(1)
|
|
+/*
|
|
+ * Some of the Arasan variations might not have timing requirements
|
|
+ * met at 25MHz for Default Speed mode, those controllers work at
|
|
+ * 19MHz instead
|
|
+ */
|
|
+#define SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN BIT(2)
|
|
};
|
|
|
|
struct sdhci_arasan_of_data {
|
|
@@ -267,7 +273,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
|
|
* through low speeds without power cycling.
|
|
*/
|
|
sdhci_set_clock(host, host->max_clk);
|
|
- phy_power_on(sdhci_arasan->phy);
|
|
+ if (phy_power_on(sdhci_arasan->phy)) {
|
|
+ pr_err("%s: Cannot power on phy.\n",
|
|
+ mmc_hostname(host->mmc));
|
|
+ return;
|
|
+ }
|
|
+
|
|
sdhci_arasan->is_phy_on = true;
|
|
|
|
/*
|
|
@@ -290,6 +301,16 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
|
|
sdhci_arasan->is_phy_on = false;
|
|
}
|
|
|
|
+ if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN) {
|
|
+ /*
|
|
+ * Some of the Arasan variations might not have timing
|
|
+ * requirements met at 25MHz for Default Speed mode,
|
|
+ * those controllers work at 19MHz instead.
|
|
+ */
|
|
+ if (clock == DEFAULT_SPEED_MAX_DTR)
|
|
+ clock = (DEFAULT_SPEED_MAX_DTR * 19) / 25;
|
|
+ }
|
|
+
|
|
/* Set the Input and Output Clock Phase Delays */
|
|
if (clk_data->set_clk_delays)
|
|
clk_data->set_clk_delays(host);
|
|
@@ -307,7 +328,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
|
|
msleep(20);
|
|
|
|
if (ctrl_phy) {
|
|
- phy_power_on(sdhci_arasan->phy);
|
|
+ if (phy_power_on(sdhci_arasan->phy)) {
|
|
+ pr_err("%s: Cannot power on phy.\n",
|
|
+ mmc_hostname(host->mmc));
|
|
+ return;
|
|
+ }
|
|
+
|
|
sdhci_arasan->is_phy_on = true;
|
|
}
|
|
}
|
|
@@ -463,7 +489,9 @@ static int sdhci_arasan_suspend(struct device *dev)
|
|
ret = phy_power_off(sdhci_arasan->phy);
|
|
if (ret) {
|
|
dev_err(dev, "Cannot power off phy.\n");
|
|
- sdhci_resume_host(host);
|
|
+ if (sdhci_resume_host(host))
|
|
+ dev_err(dev, "Cannot resume host.\n");
|
|
+
|
|
return ret;
|
|
}
|
|
sdhci_arasan->is_phy_on = false;
|
|
@@ -1598,6 +1626,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
|
|
if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a")) {
|
|
host->mmc_host_ops.execute_tuning =
|
|
arasan_zynqmp_execute_tuning;
|
|
+
|
|
+ sdhci_arasan->quirks |= SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN;
|
|
}
|
|
|
|
arasan_dt_parse_clk_phases(&pdev->dev, &sdhci_arasan->clk_data);
|
|
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
|
|
index 018af1e38eb9b..645c7cabcbe4d 100644
|
|
--- a/drivers/net/bonding/bond_main.c
|
|
+++ b/drivers/net/bonding/bond_main.c
|
|
@@ -2219,7 +2219,6 @@ static int __bond_release_one(struct net_device *bond_dev,
|
|
/* recompute stats just before removing the slave */
|
|
bond_get_stats(bond->dev, &bond->bond_stats);
|
|
|
|
- bond_upper_dev_unlink(bond, slave);
|
|
/* unregister rx_handler early so bond_handle_frame wouldn't be called
|
|
* for this slave anymore.
|
|
*/
|
|
@@ -2228,6 +2227,8 @@ static int __bond_release_one(struct net_device *bond_dev,
|
|
if (BOND_MODE(bond) == BOND_MODE_8023AD)
|
|
bond_3ad_unbind_slave(slave);
|
|
|
|
+ bond_upper_dev_unlink(bond, slave);
|
|
+
|
|
if (bond_mode_can_use_xmit_hash(bond))
|
|
bond_update_slave_arr(bond, slave);
|
|
|
|
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
|
|
index a455534740cdf..95e634cbc4b63 100644
|
|
--- a/drivers/net/dsa/lantiq_gswip.c
|
|
+++ b/drivers/net/dsa/lantiq_gswip.c
|
|
@@ -853,7 +853,8 @@ static int gswip_setup(struct dsa_switch *ds)
|
|
|
|
gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
|
|
GSWIP_MAC_CTRL_2p(cpu_port));
|
|
- gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8, GSWIP_MAC_FLEN);
|
|
+ gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN,
|
|
+ GSWIP_MAC_FLEN);
|
|
gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
|
|
GSWIP_BM_QUEUE_GCTRL);
|
|
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
|
|
index 61f6f0287cbe1..ff9d84a7147f1 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
|
|
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
|
|
@@ -10,7 +10,14 @@
|
|
|
|
static u16 hclge_errno_to_resp(int errno)
|
|
{
|
|
- return abs(errno);
|
|
+ int resp = abs(errno);
|
|
+
|
|
+ /* The status for pf to vf msg cmd is u16, constrainted by HW.
|
|
+ * We need to keep the same type with it.
|
|
+ * The intput errno is the stander error code, it's safely to
|
|
+ * use a u16 to store the abs(errno).
|
|
+ */
|
|
+ return (u16)resp;
|
|
}
|
|
|
|
/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
|
|
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
|
|
index 7023aa147043f..f06c079e812ec 100644
|
|
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
|
|
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
|
|
@@ -131,6 +131,30 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
|
|
return 0;
|
|
}
|
|
|
|
+/**
|
|
+ * iavf_lock_timeout - try to set bit but give up after timeout
|
|
+ * @adapter: board private structure
|
|
+ * @bit: bit to set
|
|
+ * @msecs: timeout in msecs
|
|
+ *
|
|
+ * Returns 0 on success, negative on failure
|
|
+ **/
|
|
+static int iavf_lock_timeout(struct iavf_adapter *adapter,
|
|
+ enum iavf_critical_section_t bit,
|
|
+ unsigned int msecs)
|
|
+{
|
|
+ unsigned int wait, delay = 10;
|
|
+
|
|
+ for (wait = 0; wait < msecs; wait += delay) {
|
|
+ if (!test_and_set_bit(bit, &adapter->crit_section))
|
|
+ return 0;
|
|
+
|
|
+ msleep(delay);
|
|
+ }
|
|
+
|
|
+ return -1;
|
|
+}
|
|
+
|
|
/**
|
|
* iavf_schedule_reset - Set the flags and schedule a reset event
|
|
* @adapter: board private structure
|
|
@@ -1951,7 +1975,6 @@ static void iavf_watchdog_task(struct work_struct *work)
|
|
/* check for hw reset */
|
|
reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
|
|
if (!reg_val) {
|
|
- adapter->state = __IAVF_RESETTING;
|
|
adapter->flags |= IAVF_FLAG_RESET_PENDING;
|
|
adapter->aq_required = 0;
|
|
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
|
|
@@ -2065,6 +2088,10 @@ static void iavf_reset_task(struct work_struct *work)
|
|
if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
|
|
return;
|
|
|
|
+ if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 200)) {
|
|
+ schedule_work(&adapter->reset_task);
|
|
+ return;
|
|
+ }
|
|
while (test_and_set_bit(__IAVF_IN_CLIENT_TASK,
|
|
&adapter->crit_section))
|
|
usleep_range(500, 1000);
|
|
@@ -2279,6 +2306,8 @@ static void iavf_adminq_task(struct work_struct *work)
|
|
if (!event.msg_buf)
|
|
goto out;
|
|
|
|
+ if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 200))
|
|
+ goto freedom;
|
|
do {
|
|
ret = iavf_clean_arq_element(hw, &event, &pending);
|
|
v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
|
|
@@ -2292,6 +2321,7 @@ static void iavf_adminq_task(struct work_struct *work)
|
|
if (pending != 0)
|
|
memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
|
|
} while (pending);
|
|
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
|
|
|
if ((adapter->flags &
|
|
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
|
|
@@ -3594,6 +3624,10 @@ static void iavf_init_task(struct work_struct *work)
|
|
init_task.work);
|
|
struct iavf_hw *hw = &adapter->hw;
|
|
|
|
+ if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000)) {
|
|
+ dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__);
|
|
+ return;
|
|
+ }
|
|
switch (adapter->state) {
|
|
case __IAVF_STARTUP:
|
|
if (iavf_startup(adapter) < 0)
|
|
@@ -3606,14 +3640,14 @@ static void iavf_init_task(struct work_struct *work)
|
|
case __IAVF_INIT_GET_RESOURCES:
|
|
if (iavf_init_get_resources(adapter) < 0)
|
|
goto init_failed;
|
|
- return;
|
|
+ goto out;
|
|
default:
|
|
goto init_failed;
|
|
}
|
|
|
|
queue_delayed_work(iavf_wq, &adapter->init_task,
|
|
msecs_to_jiffies(30));
|
|
- return;
|
|
+ goto out;
|
|
init_failed:
|
|
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
|
|
dev_err(&adapter->pdev->dev,
|
|
@@ -3622,9 +3656,11 @@ init_failed:
|
|
iavf_shutdown_adminq(hw);
|
|
adapter->state = __IAVF_STARTUP;
|
|
queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5);
|
|
- return;
|
|
+ goto out;
|
|
}
|
|
queue_delayed_work(iavf_wq, &adapter->init_task, HZ);
|
|
+out:
|
|
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
|
}
|
|
|
|
/**
|
|
@@ -3641,9 +3677,12 @@ static void iavf_shutdown(struct pci_dev *pdev)
|
|
if (netif_running(netdev))
|
|
iavf_close(netdev);
|
|
|
|
+ if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000))
|
|
+ dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__);
|
|
/* Prevent the watchdog from running. */
|
|
adapter->state = __IAVF_REMOVE;
|
|
adapter->aq_required = 0;
|
|
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
|
|
|
#ifdef CONFIG_PM
|
|
pci_save_state(pdev);
|
|
@@ -3871,10 +3910,6 @@ static void iavf_remove(struct pci_dev *pdev)
|
|
err);
|
|
}
|
|
|
|
- /* Shut down all the garbage mashers on the detention level */
|
|
- adapter->state = __IAVF_REMOVE;
|
|
- adapter->aq_required = 0;
|
|
- adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
|
|
iavf_request_reset(adapter);
|
|
msleep(50);
|
|
/* If the FW isn't responding, kick it once, but only once. */
|
|
@@ -3882,6 +3917,13 @@ static void iavf_remove(struct pci_dev *pdev)
|
|
iavf_request_reset(adapter);
|
|
msleep(50);
|
|
}
|
|
+ if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000))
|
|
+ dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__);
|
|
+
|
|
+ /* Shut down all the garbage mashers on the detention level */
|
|
+ adapter->state = __IAVF_REMOVE;
|
|
+ adapter->aq_required = 0;
|
|
+ adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
|
|
iavf_free_all_tx_resources(adapter);
|
|
iavf_free_all_rx_resources(adapter);
|
|
iavf_misc_irq_disable(adapter);
|
|
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
|
|
index 013dd29553814..cae090a072524 100644
|
|
--- a/drivers/net/ethernet/intel/igc/igc_main.c
|
|
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
|
|
@@ -4083,6 +4083,7 @@ static irqreturn_t igc_msix_ring(int irq, void *data)
|
|
*/
|
|
static int igc_request_msix(struct igc_adapter *adapter)
|
|
{
|
|
+ unsigned int num_q_vectors = adapter->num_q_vectors;
|
|
int i = 0, err = 0, vector = 0, free_vector = 0;
|
|
struct net_device *netdev = adapter->netdev;
|
|
|
|
@@ -4091,7 +4092,13 @@ static int igc_request_msix(struct igc_adapter *adapter)
|
|
if (err)
|
|
goto err_out;
|
|
|
|
- for (i = 0; i < adapter->num_q_vectors; i++) {
|
|
+ if (num_q_vectors > MAX_Q_VECTORS) {
|
|
+ num_q_vectors = MAX_Q_VECTORS;
|
|
+ dev_warn(&adapter->pdev->dev,
|
|
+ "The number of queue vectors (%d) is higher than max allowed (%d)\n",
|
|
+ adapter->num_q_vectors, MAX_Q_VECTORS);
|
|
+ }
|
|
+ for (i = 0; i < num_q_vectors; i++) {
|
|
struct igc_q_vector *q_vector = adapter->q_vector[i];
|
|
|
|
vector++;
|
|
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
|
|
index df238e46e2aeb..b062ed06235d2 100644
|
|
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
|
|
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
|
|
@@ -1129,7 +1129,22 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
|
|
/* Enable backpressure for RQ aura */
|
|
if (aura_id < pfvf->hw.rqpool_cnt) {
|
|
aq->aura.bp_ena = 0;
|
|
+ /* If NIX1 LF is attached then specify NIX1_RX.
|
|
+ *
|
|
+ * Below NPA_AURA_S[BP_ENA] is set according to the
|
|
+ * NPA_BPINTF_E enumeration given as:
|
|
+ * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so
|
|
+ * NIX0_RX is 0x0 + 0*0x1 = 0
|
|
+ * NIX1_RX is 0x0 + 1*0x1 = 1
|
|
+ * But in HRM it is given that
|
|
+ * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to
|
|
+ * NIX-RX based on [BP] level. One bit per NIX-RX; index
|
|
+ * enumerated by NPA_BPINTF_E."
|
|
+ */
|
|
+ if (pfvf->nix_blkaddr == BLKADDR_NIX1)
|
|
+ aq->aura.bp_ena = 1;
|
|
aq->aura.nix0_bpid = pfvf->bpid[0];
|
|
+
|
|
/* Set backpressure level for RQ's Aura */
|
|
aq->aura.bp = RQ_BP_LVL_AURA;
|
|
}
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
|
|
index e49387dbef987..2e55e00888715 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
|
|
@@ -865,7 +865,7 @@ static void cb_timeout_handler(struct work_struct *work)
|
|
ent->ret = -ETIMEDOUT;
|
|
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
|
|
ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
|
|
- mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
|
|
+ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
|
|
|
|
out:
|
|
cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */
|
|
@@ -982,7 +982,7 @@ static void cmd_work_handler(struct work_struct *work)
|
|
MLX5_SET(mbox_out, ent->out, status, status);
|
|
MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
|
|
|
|
- mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
|
|
+ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
|
|
return;
|
|
}
|
|
|
|
@@ -996,7 +996,7 @@ static void cmd_work_handler(struct work_struct *work)
|
|
poll_timeout(ent);
|
|
/* make sure we read the descriptor after ownership is SW */
|
|
rmb();
|
|
- mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
|
|
+ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
|
|
}
|
|
}
|
|
|
|
@@ -1056,7 +1056,7 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
|
|
mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
|
|
|
|
ent->ret = -ETIMEDOUT;
|
|
- mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
|
|
+ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
|
|
}
|
|
|
|
static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
|
|
index b3c9dc032026c..478de5ded7c21 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
|
|
@@ -824,9 +824,9 @@ again:
|
|
new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
|
|
ste_location, send_ste_list);
|
|
if (!new_htbl) {
|
|
- mlx5dr_htbl_put(cur_htbl);
|
|
mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
|
|
cur_htbl->chunk_size);
|
|
+ mlx5dr_htbl_put(cur_htbl);
|
|
} else {
|
|
cur_htbl = new_htbl;
|
|
}
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
|
|
index ea3c6cf27db42..eb6677f737a0f 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
|
|
@@ -605,6 +605,7 @@ static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev,
|
|
|
|
MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
|
|
MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
|
|
+ MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x8); /* ~1ms */
|
|
|
|
MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
|
|
MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn);
|
|
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
|
|
index 437226866ce81..dfc1f32cda2b3 100644
|
|
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
|
|
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
|
|
@@ -1697,7 +1697,7 @@ nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
|
|
case NFP_NET_META_RESYNC_INFO:
|
|
if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
|
|
pkt_len))
|
|
- return NULL;
|
|
+ return false;
|
|
data += sizeof(struct nfp_net_tls_resync_req);
|
|
break;
|
|
default:
|
|
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
|
|
index 749585fe6fc96..90f69f43770a4 100644
|
|
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
|
|
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
|
|
@@ -289,10 +289,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
|
|
val &= ~NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
|
|
break;
|
|
default:
|
|
- dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
|
|
- phy_modes(gmac->phy_mode));
|
|
- err = -EINVAL;
|
|
- goto err_remove_config_dt;
|
|
+ goto err_unsupported_phy;
|
|
}
|
|
regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
|
|
|
|
@@ -309,10 +306,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
|
|
NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
|
|
break;
|
|
default:
|
|
- dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
|
|
- phy_modes(gmac->phy_mode));
|
|
- err = -EINVAL;
|
|
- goto err_remove_config_dt;
|
|
+ goto err_unsupported_phy;
|
|
}
|
|
regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
|
|
|
|
@@ -329,8 +323,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
|
|
NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
|
|
break;
|
|
default:
|
|
- /* We don't get here; the switch above will have errored out */
|
|
- unreachable();
|
|
+ goto err_unsupported_phy;
|
|
}
|
|
regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
|
|
|
|
@@ -361,6 +354,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
|
|
|
|
return 0;
|
|
|
|
+err_unsupported_phy:
|
|
+ dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
|
|
+ phy_modes(gmac->phy_mode));
|
|
+ err = -EINVAL;
|
|
+
|
|
err_remove_config_dt:
|
|
stmmac_remove_config_dt(pdev, plat_dat);
|
|
|
|
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
|
|
index c0d181a7f83ae..0b7135a3c585a 100644
|
|
--- a/drivers/net/ethernet/wiznet/w5100.c
|
|
+++ b/drivers/net/ethernet/wiznet/w5100.c
|
|
@@ -1052,6 +1052,8 @@ static int w5100_mmio_probe(struct platform_device *pdev)
|
|
mac_addr = data->mac_addr;
|
|
|
|
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
+ if (!mem)
|
|
+ return -EINVAL;
|
|
if (resource_size(mem) < W5100_BUS_DIRECT_SIZE)
|
|
ops = &w5100_mmio_indirect_ops;
|
|
else
|
|
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
|
|
index a9b058bb1be87..7bf43031cea8c 100644
|
|
--- a/drivers/net/phy/dp83822.c
|
|
+++ b/drivers/net/phy/dp83822.c
|
|
@@ -305,11 +305,9 @@ static int dp83822_config_intr(struct phy_device *phydev)
|
|
|
|
static int dp8382x_disable_wol(struct phy_device *phydev)
|
|
{
|
|
- int value = DP83822_WOL_EN | DP83822_WOL_MAGIC_EN |
|
|
- DP83822_WOL_SECURE_ON;
|
|
-
|
|
- return phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
|
|
- MII_DP83822_WOL_CFG, value);
|
|
+ return phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
|
|
+ DP83822_WOL_EN | DP83822_WOL_MAGIC_EN |
|
|
+ DP83822_WOL_SECURE_ON);
|
|
}
|
|
|
|
static int dp83822_read_status(struct phy_device *phydev)
|
|
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
|
|
index b4885a700296e..b0a4ca3559fd8 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
|
|
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
|
|
@@ -3351,7 +3351,8 @@ found:
|
|
"Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n",
|
|
cptr, code, reference, length, major, minor);
|
|
if ((!AR_SREV_9485(ah) && length >= 1024) ||
|
|
- (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) {
|
|
+ (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485) ||
|
|
+ (length > cptr)) {
|
|
ath_dbg(common, EEPROM, "Skipping bad header\n");
|
|
cptr -= COMP_HDR_LEN;
|
|
continue;
|
|
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
|
|
index c86faebbc4594..6b2668f065d54 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/hw.c
|
|
+++ b/drivers/net/wireless/ath/ath9k/hw.c
|
|
@@ -1622,7 +1622,6 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
|
|
ath9k_hw_gpio_request_out(ah, i, NULL,
|
|
AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
|
|
ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i)));
|
|
- ath9k_hw_gpio_free(ah, i);
|
|
}
|
|
}
|
|
|
|
@@ -2730,14 +2729,17 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type)
|
|
static void ath9k_hw_gpio_cfg_soc(struct ath_hw *ah, u32 gpio, bool out,
|
|
const char *label)
|
|
{
|
|
+ int err;
|
|
+
|
|
if (ah->caps.gpio_requested & BIT(gpio))
|
|
return;
|
|
|
|
- /* may be requested by BSP, free anyway */
|
|
- gpio_free(gpio);
|
|
-
|
|
- if (gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label))
|
|
+ err = gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label);
|
|
+ if (err) {
|
|
+ ath_err(ath9k_hw_common(ah), "request GPIO%d failed:%d\n",
|
|
+ gpio, err);
|
|
return;
|
|
+ }
|
|
|
|
ah->caps.gpio_requested |= BIT(gpio);
|
|
}
|
|
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
|
|
index 9f8e44210e89a..6bed619535427 100644
|
|
--- a/drivers/net/wireless/ath/wcn36xx/main.c
|
|
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
|
|
@@ -405,13 +405,14 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
|
|
wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
|
|
ch);
|
|
|
|
- if (wcn->sw_scan_opchannel == ch) {
|
|
+ if (wcn->sw_scan_opchannel == ch && wcn->sw_scan_channel) {
|
|
/* If channel is the initial operating channel, we may
|
|
* want to receive/transmit regular data packets, then
|
|
* simply stop the scan session and exit PS mode.
|
|
*/
|
|
wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN,
|
|
wcn->sw_scan_vif);
|
|
+ wcn->sw_scan_channel = 0;
|
|
} else if (wcn->sw_scan) {
|
|
/* A scan is ongoing, do not change the operating
|
|
* channel, but start a scan session on the channel.
|
|
@@ -419,6 +420,7 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
|
|
wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN,
|
|
wcn->sw_scan_vif);
|
|
wcn36xx_smd_start_scan(wcn, ch);
|
|
+ wcn->sw_scan_channel = ch;
|
|
} else {
|
|
wcn36xx_change_opchannel(wcn, ch);
|
|
}
|
|
@@ -699,6 +701,7 @@ static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw,
|
|
|
|
wcn->sw_scan = true;
|
|
wcn->sw_scan_vif = vif;
|
|
+ wcn->sw_scan_channel = 0;
|
|
if (vif_priv->sta_assoc)
|
|
wcn->sw_scan_opchannel = WCN36XX_HW_CHANNEL(wcn);
|
|
else
|
|
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
|
|
index 1b831157ede17..cab196bb38cd4 100644
|
|
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
|
|
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
|
|
@@ -287,6 +287,10 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
|
|
status.rate_idx = 0;
|
|
}
|
|
|
|
+ if (ieee80211_is_beacon(hdr->frame_control) ||
|
|
+ ieee80211_is_probe_resp(hdr->frame_control))
|
|
+ status.boottime_ns = ktime_get_boottime_ns();
|
|
+
|
|
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
|
|
|
|
if (ieee80211_is_beacon(hdr->frame_control)) {
|
|
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
|
|
index 71fa9992b118c..d0fcce86903ae 100644
|
|
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
|
|
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
|
|
@@ -232,6 +232,7 @@ struct wcn36xx {
|
|
struct cfg80211_scan_request *scan_req;
|
|
bool sw_scan;
|
|
u8 sw_scan_opchannel;
|
|
+ u8 sw_scan_channel;
|
|
struct ieee80211_vif *sw_scan_vif;
|
|
struct mutex scan_lock;
|
|
bool scan_aborted;
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
|
|
index ab4a8b942c81d..419eaa5cf0b50 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
|
|
@@ -2303,7 +2303,7 @@ static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
|
return;
|
|
|
|
if (dump_data->monitor_only)
|
|
- dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR;
|
|
+ dump_mask &= BIT(IWL_FW_ERROR_DUMP_FW_MONITOR);
|
|
|
|
fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask);
|
|
file_len = le32_to_cpu(dump_file->file_len);
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
|
|
index 5243b84e653cf..6a8bf9bb9c455 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
|
|
@@ -1044,8 +1044,10 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
|
|
return -ENOMEM;
|
|
|
|
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
|
- if (mvm->beacon_inject_active)
|
|
+ if (mvm->beacon_inject_active) {
|
|
+ dev_kfree_skb(beacon);
|
|
return -EBUSY;
|
|
+ }
|
|
#endif
|
|
|
|
ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon);
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
|
|
index 9caff70cbd276..6f301ac8cce20 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
|
|
@@ -3029,16 +3029,20 @@ static void iwl_mvm_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy,
|
|
void *_data)
|
|
{
|
|
struct iwl_mvm_he_obss_narrow_bw_ru_data *data = _data;
|
|
+ const struct cfg80211_bss_ies *ies;
|
|
const struct element *elem;
|
|
|
|
- elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, bss->ies->data,
|
|
- bss->ies->len);
|
|
+ rcu_read_lock();
|
|
+ ies = rcu_dereference(bss->ies);
|
|
+ elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data,
|
|
+ ies->len);
|
|
|
|
if (!elem || elem->datalen < 10 ||
|
|
!(elem->data[10] &
|
|
WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) {
|
|
data->tolerated = false;
|
|
}
|
|
+ rcu_read_unlock();
|
|
}
|
|
|
|
static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
|
|
index cb83490f1016f..0be8ff30b13e6 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
|
|
@@ -678,10 +678,26 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|
|
|
mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
|
|
|
|
- mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
|
|
- mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
|
|
- mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
|
|
- mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
|
|
+ if (iwl_mvm_has_new_tx_api(mvm)) {
|
|
+ /*
|
|
+ * If we have the new TX/queue allocation API initialize them
|
|
+ * all to invalid numbers. We'll rewrite the ones that we need
|
|
+ * later, but that doesn't happen for all of them all of the
|
|
+ * time (e.g. P2P Device is optional), and if a dynamic queue
|
|
+ * ends up getting number 2 (IWL_MVM_DQA_P2P_DEVICE_QUEUE) then
|
|
+ * iwl_mvm_is_static_queue() erroneously returns true, and we
|
|
+ * might have things getting stuck.
|
|
+ */
|
|
+ mvm->aux_queue = IWL_MVM_INVALID_QUEUE;
|
|
+ mvm->snif_queue = IWL_MVM_INVALID_QUEUE;
|
|
+ mvm->probe_queue = IWL_MVM_INVALID_QUEUE;
|
|
+ mvm->p2p_dev_queue = IWL_MVM_INVALID_QUEUE;
|
|
+ } else {
|
|
+ mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
|
|
+ mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
|
|
+ mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
|
|
+ mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
|
|
+ }
|
|
|
|
mvm->sf_state = SF_UNINIT;
|
|
if (iwl_mvm_has_unified_ucode(mvm))
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
|
|
index aebaad45043fa..a5d90e028833c 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
|
|
@@ -1682,7 +1682,7 @@ iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm,
|
|
struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i];
|
|
u32 n_aps_flag =
|
|
iwl_mvm_scan_ch_n_aps_flag(vif_type,
|
|
- cfg->v2.channel_num);
|
|
+ channels[i]->hw_value);
|
|
|
|
cfg->flags = cpu_to_le32(flags | n_aps_flag);
|
|
cfg->v2.channel_num = channels[i]->hw_value;
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
|
|
index a66a5c19474a9..ef62839894c77 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
|
|
@@ -362,8 +362,9 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
|
|
}
|
|
|
|
static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|
- int queue, u8 tid, u8 flags)
|
|
+ u16 *queueptr, u8 tid, u8 flags)
|
|
{
|
|
+ int queue = *queueptr;
|
|
struct iwl_scd_txq_cfg_cmd cmd = {
|
|
.scd_queue = queue,
|
|
.action = SCD_CFG_DISABLE_QUEUE,
|
|
@@ -372,6 +373,7 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|
|
|
if (iwl_mvm_has_new_tx_api(mvm)) {
|
|
iwl_trans_txq_free(mvm->trans, queue);
|
|
+ *queueptr = IWL_MVM_INVALID_QUEUE;
|
|
|
|
return 0;
|
|
}
|
|
@@ -533,6 +535,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
|
|
u8 sta_id, tid;
|
|
unsigned long disable_agg_tids = 0;
|
|
bool same_sta;
|
|
+ u16 queue_tmp = queue;
|
|
int ret;
|
|
|
|
lockdep_assert_held(&mvm->mutex);
|
|
@@ -555,7 +558,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
|
|
iwl_mvm_invalidate_sta_queue(mvm, queue,
|
|
disable_agg_tids, false);
|
|
|
|
- ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
|
|
+ ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid, 0);
|
|
if (ret) {
|
|
IWL_ERR(mvm,
|
|
"Failed to free inactive queue %d (ret=%d)\n",
|
|
@@ -1230,6 +1233,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
|
unsigned int wdg_timeout =
|
|
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
|
|
int queue = -1;
|
|
+ u16 queue_tmp;
|
|
unsigned long disable_agg_tids = 0;
|
|
enum iwl_mvm_agg_state queue_state;
|
|
bool shared_queue = false, inc_ssn;
|
|
@@ -1378,7 +1382,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
|
return 0;
|
|
|
|
out_err:
|
|
- iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
|
|
+ queue_tmp = queue;
|
|
+ iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid, 0);
|
|
|
|
return ret;
|
|
}
|
|
@@ -1825,7 +1830,7 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
|
|
if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
|
|
continue;
|
|
|
|
- iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i,
|
|
+ iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i,
|
|
0);
|
|
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
|
|
}
|
|
@@ -2033,7 +2038,7 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
|
|
ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
|
|
if (ret) {
|
|
if (!iwl_mvm_has_new_tx_api(mvm))
|
|
- iwl_mvm_disable_txq(mvm, NULL, *queue,
|
|
+ iwl_mvm_disable_txq(mvm, NULL, queue,
|
|
IWL_MAX_TID_COUNT, 0);
|
|
return ret;
|
|
}
|
|
@@ -2106,7 +2111,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|
if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
|
|
return -EINVAL;
|
|
|
|
- iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
|
|
+ iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
|
|
ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
|
|
if (ret)
|
|
IWL_WARN(mvm, "Failed sending remove station\n");
|
|
@@ -2123,7 +2128,7 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
|
|
if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
|
|
return -EINVAL;
|
|
|
|
- iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
|
|
+ iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
|
|
ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
|
|
if (ret)
|
|
IWL_WARN(mvm, "Failed sending remove station\n");
|
|
@@ -2219,7 +2224,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
|
|
struct ieee80211_vif *vif)
|
|
{
|
|
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
|
- int queue;
|
|
+ u16 *queueptr, queue;
|
|
|
|
lockdep_assert_held(&mvm->mutex);
|
|
|
|
@@ -2228,10 +2233,10 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
|
|
switch (vif->type) {
|
|
case NL80211_IFTYPE_AP:
|
|
case NL80211_IFTYPE_ADHOC:
|
|
- queue = mvm->probe_queue;
|
|
+ queueptr = &mvm->probe_queue;
|
|
break;
|
|
case NL80211_IFTYPE_P2P_DEVICE:
|
|
- queue = mvm->p2p_dev_queue;
|
|
+ queueptr = &mvm->p2p_dev_queue;
|
|
break;
|
|
default:
|
|
WARN(1, "Can't free bcast queue on vif type %d\n",
|
|
@@ -2239,7 +2244,8 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
|
|
return;
|
|
}
|
|
|
|
- iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
|
|
+ queue = *queueptr;
|
|
+ iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT, 0);
|
|
if (iwl_mvm_has_new_tx_api(mvm))
|
|
return;
|
|
|
|
@@ -2474,7 +2480,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|
|
|
iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
|
|
|
|
- iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
|
|
+ iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0, 0);
|
|
|
|
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
|
|
if (ret)
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
|
|
index 94299f259518d..2c13fa8f28200 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
|
|
@@ -544,6 +544,9 @@ void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
int i;
|
|
|
|
+ if (!trans_pcie->rx_pool)
|
|
+ return;
|
|
+
|
|
for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
|
|
if (!trans_pcie->rx_pool[i].page)
|
|
continue;
|
|
@@ -1094,7 +1097,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
|
|
INIT_LIST_HEAD(&rba->rbd_empty);
|
|
spin_unlock(&rba->lock);
|
|
|
|
- /* free all first - we might be reconfigured for a different size */
|
|
+ /* free all first - we overwrite everything here */
|
|
iwl_pcie_free_rbs_pool(trans);
|
|
|
|
for (i = 0; i < RX_QUEUE_SIZE; i++)
|
|
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
|
|
index bb990be7c870b..082768ec8aa80 100644
|
|
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
|
|
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
|
|
@@ -1909,6 +1909,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
|
|
{
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
+ /* free all first - we might be reconfigured for a different size */
|
|
+ iwl_pcie_free_rbs_pool(trans);
|
|
+
|
|
trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
|
|
trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
|
|
trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
|
|
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
|
|
index acb6b0cd36672..b28fa0c4d180c 100644
|
|
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
|
|
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
|
|
@@ -1378,6 +1378,8 @@ struct rtl8xxxu_priv {
|
|
u8 no_pape:1;
|
|
u8 int_buf[USB_INTR_CONTENT_LENGTH];
|
|
u8 rssi_level;
|
|
+ DECLARE_BITMAP(tx_aggr_started, IEEE80211_NUM_TIDS);
|
|
+ DECLARE_BITMAP(tid_tx_operational, IEEE80211_NUM_TIDS);
|
|
/*
|
|
* Only one virtual interface permitted because only STA mode
|
|
* is supported and no iface_combinations are provided.
|
|
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
|
|
index 5cd7ef3625c5e..0d374a2948406 100644
|
|
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
|
|
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
|
|
@@ -4805,6 +4805,8 @@ rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
|
|
struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
|
|
struct rtl8xxxu_priv *priv = hw->priv;
|
|
struct device *dev = &priv->udev->dev;
|
|
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
|
|
+ u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
|
|
u32 rate;
|
|
u16 rate_flags = tx_info->control.rates[0].flags;
|
|
u16 seq_number;
|
|
@@ -4828,7 +4830,7 @@ rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
|
|
|
|
tx_desc->txdw3 = cpu_to_le32((u32)seq_number << TXDESC32_SEQ_SHIFT);
|
|
|
|
- if (ampdu_enable)
|
|
+ if (ampdu_enable && test_bit(tid, priv->tid_tx_operational))
|
|
tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_ENABLE);
|
|
else
|
|
tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_BREAK);
|
|
@@ -4876,6 +4878,8 @@ rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
|
|
struct rtl8xxxu_priv *priv = hw->priv;
|
|
struct device *dev = &priv->udev->dev;
|
|
struct rtl8xxxu_txdesc40 *tx_desc40;
|
|
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
|
|
+ u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
|
|
u32 rate;
|
|
u16 rate_flags = tx_info->control.rates[0].flags;
|
|
u16 seq_number;
|
|
@@ -4902,7 +4906,7 @@ rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
|
|
|
|
tx_desc40->txdw9 = cpu_to_le32((u32)seq_number << TXDESC40_SEQ_SHIFT);
|
|
|
|
- if (ampdu_enable)
|
|
+ if (ampdu_enable && test_bit(tid, priv->tid_tx_operational))
|
|
tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_ENABLE);
|
|
else
|
|
tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_BREAK);
|
|
@@ -5015,12 +5019,19 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
|
|
if (ieee80211_is_data_qos(hdr->frame_control) && sta) {
|
|
if (sta->ht_cap.ht_supported) {
|
|
u32 ampdu, val32;
|
|
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
|
|
+ u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
|
|
|
|
ampdu = (u32)sta->ht_cap.ampdu_density;
|
|
val32 = ampdu << TXDESC_AMPDU_DENSITY_SHIFT;
|
|
tx_desc->txdw2 |= cpu_to_le32(val32);
|
|
|
|
ampdu_enable = true;
|
|
+
|
|
+ if (!test_bit(tid, priv->tx_aggr_started) &&
|
|
+ !(skb->protocol == cpu_to_be16(ETH_P_PAE)))
|
|
+ if (!ieee80211_start_tx_ba_session(sta, tid, 0))
|
|
+ set_bit(tid, priv->tx_aggr_started);
|
|
}
|
|
}
|
|
|
|
@@ -6095,6 +6106,7 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|
struct device *dev = &priv->udev->dev;
|
|
u8 ampdu_factor, ampdu_density;
|
|
struct ieee80211_sta *sta = params->sta;
|
|
+ u16 tid = params->tid;
|
|
enum ieee80211_ampdu_mlme_action action = params->action;
|
|
|
|
switch (action) {
|
|
@@ -6107,17 +6119,20 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|
dev_dbg(dev,
|
|
"Changed HT: ampdu_factor %02x, ampdu_density %02x\n",
|
|
ampdu_factor, ampdu_density);
|
|
- break;
|
|
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
|
|
+ case IEEE80211_AMPDU_TX_STOP_CONT:
|
|
case IEEE80211_AMPDU_TX_STOP_FLUSH:
|
|
- dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH\n", __func__);
|
|
- rtl8xxxu_set_ampdu_factor(priv, 0);
|
|
- rtl8xxxu_set_ampdu_min_space(priv, 0);
|
|
- break;
|
|
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
|
|
- dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH_CONT\n",
|
|
- __func__);
|
|
+ dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_STOP\n", __func__);
|
|
rtl8xxxu_set_ampdu_factor(priv, 0);
|
|
rtl8xxxu_set_ampdu_min_space(priv, 0);
|
|
+ clear_bit(tid, priv->tx_aggr_started);
|
|
+ clear_bit(tid, priv->tid_tx_operational);
|
|
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
|
+ break;
|
|
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
|
|
+ dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_OPERATIONAL\n", __func__);
|
|
+ set_bit(tid, priv->tid_tx_operational);
|
|
break;
|
|
case IEEE80211_AMPDU_RX_START:
|
|
dev_dbg(dev, "%s: IEEE80211_AMPDU_RX_START\n", __func__);
|
|
diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
|
|
index c0e4b111c8b4e..73d6807a8cdfb 100644
|
|
--- a/drivers/net/wireless/realtek/rtw88/Makefile
|
|
+++ b/drivers/net/wireless/realtek/rtw88/Makefile
|
|
@@ -15,9 +15,9 @@ rtw88_core-y += main.o \
|
|
ps.o \
|
|
sec.o \
|
|
bf.o \
|
|
- wow.o \
|
|
regd.o
|
|
|
|
+rtw88_core-$(CONFIG_PM) += wow.o
|
|
|
|
obj-$(CONFIG_RTW88_8822B) += rtw88_8822b.o
|
|
rtw88_8822b-objs := rtw8822b.o rtw8822b_table.o
|
|
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
|
|
index b2fd87834f23d..0452630bcfacc 100644
|
|
--- a/drivers/net/wireless/realtek/rtw88/fw.c
|
|
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
|
|
@@ -684,7 +684,7 @@ static u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev,
|
|
continue;
|
|
if ((!ssid && !rsvd_pkt->ssid) ||
|
|
rtw_ssid_equal(rsvd_pkt->ssid, ssid))
|
|
- size = rsvd_pkt->skb->len;
|
|
+ size = rsvd_pkt->probe_req_size;
|
|
}
|
|
|
|
return size;
|
|
@@ -912,6 +912,8 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
|
|
ssid->ssid_len, 0);
|
|
else
|
|
skb_new = ieee80211_probereq_get(hw, vif->addr, NULL, 0, 0);
|
|
+ if (skb_new)
|
|
+ rsvd_pkt->probe_req_size = (u16)skb_new->len;
|
|
break;
|
|
case RSVD_NLO_INFO:
|
|
skb_new = rtw_nlo_info_get(hw);
|
|
@@ -1508,6 +1510,7 @@ int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
|
|
static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size,
|
|
u8 location)
|
|
{
|
|
+ struct rtw_chip_info *chip = rtwdev->chip;
|
|
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
|
|
u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_UPDATE_PKT_LEN;
|
|
|
|
@@ -1518,6 +1521,7 @@ static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size,
|
|
UPDATE_PKT_SET_LOCATION(h2c_pkt, location);
|
|
|
|
/* include txdesc size */
|
|
+ size += chip->tx_pkt_desc_sz;
|
|
UPDATE_PKT_SET_SIZE(h2c_pkt, size);
|
|
|
|
rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
|
|
@@ -1527,7 +1531,7 @@ void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev,
|
|
struct cfg80211_ssid *ssid)
|
|
{
|
|
u8 loc;
|
|
- u32 size;
|
|
+ u16 size;
|
|
|
|
loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
|
|
if (!loc) {
|
|
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
|
|
index 08644540d2595..f4aed247e3bdb 100644
|
|
--- a/drivers/net/wireless/realtek/rtw88/fw.h
|
|
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
|
|
@@ -117,6 +117,7 @@ struct rtw_rsvd_page {
|
|
u8 page;
|
|
bool add_txdesc;
|
|
struct cfg80211_ssid *ssid;
|
|
+ u16 probe_req_size;
|
|
};
|
|
|
|
enum rtw_keep_alive_pkt_type {
|
|
diff --git a/drivers/net/wireless/realtek/rtw88/wow.c b/drivers/net/wireless/realtek/rtw88/wow.c
|
|
index 2fcdf70a3a77e..bb2fd4e544f00 100644
|
|
--- a/drivers/net/wireless/realtek/rtw88/wow.c
|
|
+++ b/drivers/net/wireless/realtek/rtw88/wow.c
|
|
@@ -283,15 +283,26 @@ static void rtw_wow_rx_dma_start(struct rtw_dev *rtwdev)
|
|
|
|
static int rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable)
|
|
{
|
|
- /* wait 100ms for wow firmware to finish work */
|
|
- msleep(100);
|
|
+ int ret;
|
|
+ u8 check;
|
|
+ u32 check_dis;
|
|
|
|
if (wow_enable) {
|
|
- if (rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON))
|
|
+ ret = read_poll_timeout(rtw_read8, check, !check, 1000,
|
|
+ 100000, true, rtwdev,
|
|
+ REG_WOWLAN_WAKE_REASON);
|
|
+ if (ret)
|
|
goto wow_fail;
|
|
} else {
|
|
- if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) ||
|
|
- rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE))
|
|
+ ret = read_poll_timeout(rtw_read32_mask, check_dis,
|
|
+ !check_dis, 1000, 100000, true, rtwdev,
|
|
+ REG_FE1IMR, BIT_FS_RXDONE);
|
|
+ if (ret)
|
|
+ goto wow_fail;
|
|
+ ret = read_poll_timeout(rtw_read32_mask, check_dis,
|
|
+ !check_dis, 1000, 100000, false, rtwdev,
|
|
+ REG_RXPKT_NUM, BIT_RW_RELEASE);
|
|
+ if (ret)
|
|
goto wow_fail;
|
|
}
|
|
|
|
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
|
|
index 875076b0ea6c1..d5dd79b59b16c 100644
|
|
--- a/drivers/nvdimm/pmem.c
|
|
+++ b/drivers/nvdimm/pmem.c
|
|
@@ -448,11 +448,11 @@ static int pmem_attach_disk(struct device *dev,
|
|
pmem->pfn_flags |= PFN_MAP;
|
|
bb_range = pmem->pgmap.range;
|
|
} else {
|
|
+ addr = devm_memremap(dev, pmem->phys_addr,
|
|
+ pmem->size, ARCH_MEMREMAP_PMEM);
|
|
if (devm_add_action_or_reset(dev, pmem_release_queue,
|
|
&pmem->pgmap))
|
|
return -ENOMEM;
|
|
- addr = devm_memremap(dev, pmem->phys_addr,
|
|
- pmem->size, ARCH_MEMREMAP_PMEM);
|
|
bb_range.start = res->start;
|
|
bb_range.end = res->end;
|
|
}
|
|
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
|
|
index ff5a16b17133d..5a9b2f1b1418a 100644
|
|
--- a/drivers/nvme/host/core.c
|
|
+++ b/drivers/nvme/host/core.c
|
|
@@ -878,7 +878,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
|
|
return BLK_STS_IOERR;
|
|
}
|
|
|
|
- cmd->common.command_id = req->tag;
|
|
+ nvme_req(req)->genctr++;
|
|
+ cmd->common.command_id = nvme_cid(req);
|
|
trace_nvme_setup_cmd(req, cmd);
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
|
|
index 3cb3c82061d7e..8c735c55c15bf 100644
|
|
--- a/drivers/nvme/host/nvme.h
|
|
+++ b/drivers/nvme/host/nvme.h
|
|
@@ -153,6 +153,7 @@ enum nvme_quirks {
|
|
struct nvme_request {
|
|
struct nvme_command *cmd;
|
|
union nvme_result result;
|
|
+ u8 genctr;
|
|
u8 retries;
|
|
u8 flags;
|
|
u16 status;
|
|
@@ -469,6 +470,49 @@ struct nvme_ctrl_ops {
|
|
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
|
|
};
|
|
|
|
+/*
|
|
+ * nvme command_id is constructed as such:
|
|
+ * | xxxx | xxxxxxxxxxxx |
|
|
+ * gen request tag
|
|
+ */
|
|
+#define nvme_genctr_mask(gen) (gen & 0xf)
|
|
+#define nvme_cid_install_genctr(gen) (nvme_genctr_mask(gen) << 12)
|
|
+#define nvme_genctr_from_cid(cid) ((cid & 0xf000) >> 12)
|
|
+#define nvme_tag_from_cid(cid) (cid & 0xfff)
|
|
+
|
|
+static inline u16 nvme_cid(struct request *rq)
|
|
+{
|
|
+ return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
|
|
+}
|
|
+
|
|
+static inline struct request *nvme_find_rq(struct blk_mq_tags *tags,
|
|
+ u16 command_id)
|
|
+{
|
|
+ u8 genctr = nvme_genctr_from_cid(command_id);
|
|
+ u16 tag = nvme_tag_from_cid(command_id);
|
|
+ struct request *rq;
|
|
+
|
|
+ rq = blk_mq_tag_to_rq(tags, tag);
|
|
+ if (unlikely(!rq)) {
|
|
+ pr_err("could not locate request for tag %#x\n",
|
|
+ tag);
|
|
+ return NULL;
|
|
+ }
|
|
+ if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
|
|
+ dev_err(nvme_req(rq)->ctrl->device,
|
|
+ "request %#x genctr mismatch (got %#x expected %#x)\n",
|
|
+ tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
|
|
+ return NULL;
|
|
+ }
|
|
+ return rq;
|
|
+}
|
|
+
|
|
+static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
|
|
+ u16 command_id)
|
|
+{
|
|
+ return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
|
|
+}
|
|
+
|
|
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
|
|
void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
|
|
const char *dev_name);
|
|
@@ -566,7 +610,8 @@ static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
|
|
|
|
static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
|
|
{
|
|
- return !qid && command_id >= NVME_AQ_BLK_MQ_DEPTH;
|
|
+ return !qid &&
|
|
+ nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
|
|
}
|
|
|
|
void nvme_complete_rq(struct request *req);
|
|
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
|
|
index fb48a88d1acb5..09767a805492c 100644
|
|
--- a/drivers/nvme/host/pci.c
|
|
+++ b/drivers/nvme/host/pci.c
|
|
@@ -1012,7 +1012,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
|
|
return;
|
|
}
|
|
|
|
- req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id);
|
|
+ req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id);
|
|
if (unlikely(!req)) {
|
|
dev_warn(nvmeq->dev->ctrl.device,
|
|
"invalid id %d completed on queue %d\n",
|
|
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
|
|
index c6c2e2361b2fe..9c356be7f016e 100644
|
|
--- a/drivers/nvme/host/rdma.c
|
|
+++ b/drivers/nvme/host/rdma.c
|
|
@@ -1738,10 +1738,10 @@ static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
|
|
struct request *rq;
|
|
struct nvme_rdma_request *req;
|
|
|
|
- rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
|
|
+ rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id);
|
|
if (!rq) {
|
|
dev_err(queue->ctrl->ctrl.device,
|
|
- "tag 0x%x on QP %#x not found\n",
|
|
+ "got bad command_id %#x on QP %#x\n",
|
|
cqe->command_id, queue->qp->qp_num);
|
|
nvme_rdma_error_recovery(queue->ctrl);
|
|
return;
|
|
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
|
|
index 5b11d8a23813f..c9a925999c6ea 100644
|
|
--- a/drivers/nvme/host/tcp.c
|
|
+++ b/drivers/nvme/host/tcp.c
|
|
@@ -484,11 +484,11 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
|
|
{
|
|
struct request *rq;
|
|
|
|
- rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
|
|
+ rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
|
|
if (!rq) {
|
|
dev_err(queue->ctrl->ctrl.device,
|
|
- "queue %d tag 0x%x not found\n",
|
|
- nvme_tcp_queue_id(queue), cqe->command_id);
|
|
+ "got bad cqe.command_id %#x on queue %d\n",
|
|
+ cqe->command_id, nvme_tcp_queue_id(queue));
|
|
nvme_tcp_error_recovery(&queue->ctrl->ctrl);
|
|
return -EINVAL;
|
|
}
|
|
@@ -505,11 +505,11 @@ static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
|
|
{
|
|
struct request *rq;
|
|
|
|
- rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
|
|
+ rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
|
|
if (!rq) {
|
|
dev_err(queue->ctrl->ctrl.device,
|
|
- "queue %d tag %#x not found\n",
|
|
- nvme_tcp_queue_id(queue), pdu->command_id);
|
|
+ "got bad c2hdata.command_id %#x on queue %d\n",
|
|
+ pdu->command_id, nvme_tcp_queue_id(queue));
|
|
return -ENOENT;
|
|
}
|
|
|
|
@@ -603,7 +603,7 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
|
|
data->hdr.plen =
|
|
cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
|
|
data->ttag = pdu->ttag;
|
|
- data->command_id = rq->tag;
|
|
+ data->command_id = nvme_cid(rq);
|
|
data->data_offset = cpu_to_le32(req->data_sent);
|
|
data->data_length = cpu_to_le32(req->pdu_len);
|
|
return 0;
|
|
@@ -616,11 +616,11 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
|
|
struct request *rq;
|
|
int ret;
|
|
|
|
- rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
|
|
+ rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
|
|
if (!rq) {
|
|
dev_err(queue->ctrl->ctrl.device,
|
|
- "queue %d tag %#x not found\n",
|
|
- nvme_tcp_queue_id(queue), pdu->command_id);
|
|
+ "got bad r2t.command_id %#x on queue %d\n",
|
|
+ pdu->command_id, nvme_tcp_queue_id(queue));
|
|
return -ENOENT;
|
|
}
|
|
req = blk_mq_rq_to_pdu(rq);
|
|
@@ -699,17 +699,9 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
|
|
unsigned int *offset, size_t *len)
|
|
{
|
|
struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
|
|
- struct nvme_tcp_request *req;
|
|
- struct request *rq;
|
|
-
|
|
- rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
|
|
- if (!rq) {
|
|
- dev_err(queue->ctrl->ctrl.device,
|
|
- "queue %d tag %#x not found\n",
|
|
- nvme_tcp_queue_id(queue), pdu->command_id);
|
|
- return -ENOENT;
|
|
- }
|
|
- req = blk_mq_rq_to_pdu(rq);
|
|
+ struct request *rq =
|
|
+ nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
|
|
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
|
|
|
|
while (true) {
|
|
int recv_len, ret;
|
|
@@ -801,8 +793,8 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
|
|
}
|
|
|
|
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
|
|
- struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
|
|
- pdu->command_id);
|
|
+ struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
|
|
+ pdu->command_id);
|
|
|
|
nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
|
|
queue->nr_cqe++;
|
|
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
|
|
index 16d71cc5a50eb..ff3258c3eb8b6 100644
|
|
--- a/drivers/nvme/target/loop.c
|
|
+++ b/drivers/nvme/target/loop.c
|
|
@@ -107,10 +107,10 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
|
|
} else {
|
|
struct request *rq;
|
|
|
|
- rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
|
|
+ rq = nvme_find_rq(nvme_loop_tagset(queue), cqe->command_id);
|
|
if (!rq) {
|
|
dev_err(queue->ctrl->ctrl.device,
|
|
- "tag 0x%x on queue %d not found\n",
|
|
+ "got bad command_id %#x on queue %d\n",
|
|
cqe->command_id, nvme_loop_queue_idx(queue));
|
|
return;
|
|
}
|
|
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
|
|
index 955b8b8c82386..8ef772ccfb367 100644
|
|
--- a/drivers/nvmem/qfprom.c
|
|
+++ b/drivers/nvmem/qfprom.c
|
|
@@ -104,6 +104,9 @@ static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv,
|
|
{
|
|
int ret;
|
|
|
|
+ writel(old->timer_val, priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET);
|
|
+ writel(old->accel_val, priv->qfpconf + QFPROM_ACCEL_OFFSET);
|
|
+
|
|
/*
|
|
* This may be a shared rail and may be able to run at a lower rate
|
|
* when we're not blowing fuses. At the moment, the regulator framework
|
|
@@ -124,9 +127,6 @@ static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv,
|
|
"Failed to set clock rate for disable (ignoring)\n");
|
|
|
|
clk_disable_unprepare(priv->secclk);
|
|
-
|
|
- writel(old->timer_val, priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET);
|
|
- writel(old->accel_val, priv->qfpconf + QFPROM_ACCEL_OFFSET);
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
|
|
index a32e60b024b8d..6675b5e56960c 100644
|
|
--- a/drivers/of/kobj.c
|
|
+++ b/drivers/of/kobj.c
|
|
@@ -119,7 +119,7 @@ int __of_attach_node_sysfs(struct device_node *np)
|
|
struct property *pp;
|
|
int rc;
|
|
|
|
- if (!of_kset)
|
|
+ if (!IS_ENABLED(CONFIG_SYSFS) || !of_kset)
|
|
return 0;
|
|
|
|
np->kobj.kset = of_kset;
|
|
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
|
|
index d92a1bfe16905..f83f4f6d70349 100644
|
|
--- a/drivers/opp/of.c
|
|
+++ b/drivers/opp/of.c
|
|
@@ -95,15 +95,7 @@ static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
|
|
static struct device_node *of_parse_required_opp(struct device_node *np,
|
|
int index)
|
|
{
|
|
- struct device_node *required_np;
|
|
-
|
|
- required_np = of_parse_phandle(np, "required-opps", index);
|
|
- if (unlikely(!required_np)) {
|
|
- pr_err("%s: Unable to parse required-opps: %pOF, index: %d\n",
|
|
- __func__, np, index);
|
|
- }
|
|
-
|
|
- return required_np;
|
|
+ return of_parse_phandle(np, "required-opps", index);
|
|
}
|
|
|
|
/* The caller must call dev_pm_opp_put_opp_table() after the table is used */
|
|
@@ -1193,7 +1185,7 @@ int of_get_required_opp_performance_state(struct device_node *np, int index)
|
|
|
|
required_np = of_parse_required_opp(np, index);
|
|
if (!required_np)
|
|
- return -EINVAL;
|
|
+ return -ENODEV;
|
|
|
|
opp_table = _find_table_of_opp_np(required_np);
|
|
if (IS_ERR(opp_table)) {
|
|
diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
|
|
index 2c11bd3fe1fd6..17061f1df0f44 100644
|
|
--- a/drivers/parport/ieee1284_ops.c
|
|
+++ b/drivers/parport/ieee1284_ops.c
|
|
@@ -518,7 +518,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
|
|
goto out;
|
|
|
|
/* Yield the port for a while. */
|
|
- if (count && dev->port->irq != PARPORT_IRQ_NONE) {
|
|
+ if (dev->port->irq != PARPORT_IRQ_NONE) {
|
|
parport_release (dev);
|
|
schedule_timeout_interruptible(msecs_to_jiffies(40));
|
|
parport_claim_or_block (dev);
|
|
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
|
|
index b1b41b61e0bd0..88e19ad54f646 100644
|
|
--- a/drivers/pci/controller/pci-aardvark.c
|
|
+++ b/drivers/pci/controller/pci-aardvark.c
|
|
@@ -57,6 +57,7 @@
|
|
#define PIO_COMPLETION_STATUS_CRS 2
|
|
#define PIO_COMPLETION_STATUS_CA 4
|
|
#define PIO_NON_POSTED_REQ BIT(10)
|
|
+#define PIO_ERR_STATUS BIT(11)
|
|
#define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
|
|
#define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
|
|
#define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
|
|
@@ -117,6 +118,46 @@
|
|
#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
|
|
#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
|
|
|
|
+/* PCIe window configuration */
|
|
+#define OB_WIN_BASE_ADDR 0x4c00
|
|
+#define OB_WIN_BLOCK_SIZE 0x20
|
|
+#define OB_WIN_COUNT 8
|
|
+#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
|
|
+ OB_WIN_BLOCK_SIZE * (win) + \
|
|
+ (offset))
|
|
+#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
|
|
+#define OB_WIN_ENABLE BIT(0)
|
|
+#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
|
|
+#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
|
|
+#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
|
|
+#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
|
|
+#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
|
|
+#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
|
|
+#define OB_WIN_DEFAULT_ACTIONS (OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
|
|
+#define OB_WIN_FUNC_NUM_MASK GENMASK(31, 24)
|
|
+#define OB_WIN_FUNC_NUM_SHIFT 24
|
|
+#define OB_WIN_FUNC_NUM_ENABLE BIT(23)
|
|
+#define OB_WIN_BUS_NUM_BITS_MASK GENMASK(22, 20)
|
|
+#define OB_WIN_BUS_NUM_BITS_SHIFT 20
|
|
+#define OB_WIN_MSG_CODE_ENABLE BIT(22)
|
|
+#define OB_WIN_MSG_CODE_MASK GENMASK(21, 14)
|
|
+#define OB_WIN_MSG_CODE_SHIFT 14
|
|
+#define OB_WIN_MSG_PAYLOAD_LEN BIT(12)
|
|
+#define OB_WIN_ATTR_ENABLE BIT(11)
|
|
+#define OB_WIN_ATTR_TC_MASK GENMASK(10, 8)
|
|
+#define OB_WIN_ATTR_TC_SHIFT 8
|
|
+#define OB_WIN_ATTR_RELAXED BIT(7)
|
|
+#define OB_WIN_ATTR_NOSNOOP BIT(6)
|
|
+#define OB_WIN_ATTR_POISON BIT(5)
|
|
+#define OB_WIN_ATTR_IDO BIT(4)
|
|
+#define OB_WIN_TYPE_MASK GENMASK(3, 0)
|
|
+#define OB_WIN_TYPE_SHIFT 0
|
|
+#define OB_WIN_TYPE_MEM 0x0
|
|
+#define OB_WIN_TYPE_IO 0x4
|
|
+#define OB_WIN_TYPE_CONFIG_TYPE0 0x8
|
|
+#define OB_WIN_TYPE_CONFIG_TYPE1 0x9
|
|
+#define OB_WIN_TYPE_MSG 0xc
|
|
+
|
|
/* LMI registers base address and register offsets */
|
|
#define LMI_BASE_ADDR 0x6000
|
|
#define CFG_REG (LMI_BASE_ADDR + 0x0)
|
|
@@ -187,8 +228,16 @@
|
|
struct advk_pcie {
|
|
struct platform_device *pdev;
|
|
void __iomem *base;
|
|
+ struct {
|
|
+ phys_addr_t match;
|
|
+ phys_addr_t remap;
|
|
+ phys_addr_t mask;
|
|
+ u32 actions;
|
|
+ } wins[OB_WIN_COUNT];
|
|
+ u8 wins_count;
|
|
struct irq_domain *irq_domain;
|
|
struct irq_chip irq_chip;
|
|
+ raw_spinlock_t irq_lock;
|
|
struct irq_domain *msi_domain;
|
|
struct irq_domain *msi_inner_domain;
|
|
struct irq_chip msi_bottom_irq_chip;
|
|
@@ -366,9 +415,39 @@ err:
|
|
dev_err(dev, "link never came up\n");
|
|
}
|
|
|
|
+/*
|
|
+ * Set PCIe address window register which could be used for memory
|
|
+ * mapping.
|
|
+ */
|
|
+static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num,
|
|
+ phys_addr_t match, phys_addr_t remap,
|
|
+ phys_addr_t mask, u32 actions)
|
|
+{
|
|
+ advk_writel(pcie, OB_WIN_ENABLE |
|
|
+ lower_32_bits(match), OB_WIN_MATCH_LS(win_num));
|
|
+ advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num));
|
|
+ advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num));
|
|
+ advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num));
|
|
+ advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num));
|
|
+ advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num));
|
|
+ advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num));
|
|
+}
|
|
+
|
|
+static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
|
|
+{
|
|
+ advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num));
|
|
+ advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num));
|
|
+ advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num));
|
|
+ advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num));
|
|
+ advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num));
|
|
+ advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num));
|
|
+ advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num));
|
|
+}
|
|
+
|
|
static void advk_pcie_setup_hw(struct advk_pcie *pcie)
|
|
{
|
|
u32 reg;
|
|
+ int i;
|
|
|
|
/* Enable TX */
|
|
reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG);
|
|
@@ -447,15 +526,51 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
|
|
reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
|
|
advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
|
|
|
|
+ /*
|
|
+ * Enable AXI address window location generation:
|
|
+ * When it is enabled, the default outbound window
|
|
+ * configurations (Default User Field: 0xD0074CFC)
|
|
+ * are used to transparent address translation for
|
|
+ * the outbound transactions. Thus, PCIe address
|
|
+ * windows are not required for transparent memory
|
|
+ * access when default outbound window configuration
|
|
+ * is set for memory access.
|
|
+ */
|
|
reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
|
|
reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
|
|
advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
|
|
|
|
- /* Bypass the address window mapping for PIO */
|
|
+ /*
|
|
+ * Set memory access in Default User Field so it
|
|
+ * is not required to configure PCIe address for
|
|
+ * transparent memory access.
|
|
+ */
|
|
+ advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS);
|
|
+
|
|
+ /*
|
|
+ * Bypass the address window mapping for PIO:
|
|
+ * Since PIO access already contains all required
|
|
+ * info over AXI interface by PIO registers, the
|
|
+ * address window is not required.
|
|
+ */
|
|
reg = advk_readl(pcie, PIO_CTRL);
|
|
reg |= PIO_CTRL_ADDR_WIN_DISABLE;
|
|
advk_writel(pcie, reg, PIO_CTRL);
|
|
|
|
+ /*
|
|
+ * Configure PCIe address windows for non-memory or
|
|
+ * non-transparent access as by default PCIe uses
|
|
+ * transparent memory access.
|
|
+ */
|
|
+ for (i = 0; i < pcie->wins_count; i++)
|
|
+ advk_pcie_set_ob_win(pcie, i,
|
|
+ pcie->wins[i].match, pcie->wins[i].remap,
|
|
+ pcie->wins[i].mask, pcie->wins[i].actions);
|
|
+
|
|
+ /* Disable remaining PCIe outbound windows */
|
|
+ for (i = pcie->wins_count; i < OB_WIN_COUNT; i++)
|
|
+ advk_pcie_disable_ob_win(pcie, i);
|
|
+
|
|
advk_pcie_train_link(pcie);
|
|
|
|
/*
|
|
@@ -472,7 +587,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
|
|
advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
|
|
}
|
|
|
|
-static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
|
|
+static int advk_pcie_check_pio_status(struct advk_pcie *pcie, u32 *val)
|
|
{
|
|
struct device *dev = &pcie->pdev->dev;
|
|
u32 reg;
|
|
@@ -483,14 +598,49 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
|
|
status = (reg & PIO_COMPLETION_STATUS_MASK) >>
|
|
PIO_COMPLETION_STATUS_SHIFT;
|
|
|
|
- if (!status)
|
|
- return;
|
|
-
|
|
+ /*
|
|
+ * According to HW spec, the PIO status check sequence as below:
|
|
+ * 1) even if COMPLETION_STATUS(bit9:7) indicates successful,
|
|
+ * it still needs to check Error Status(bit11), only when this bit
|
|
+ * indicates no error happen, the operation is successful.
|
|
+ * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
|
|
+ * means a PIO write error, and for PIO read it is successful with
|
|
+ * a read value of 0xFFFFFFFF.
|
|
+ * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
|
|
+ * only means a PIO write error, and for PIO read it is successful
|
|
+ * with a read value of 0xFFFF0001.
|
|
+ * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
|
|
+ * error for both PIO read and PIO write operation.
|
|
+ * 5) other errors are indicated as 'unknown'.
|
|
+ */
|
|
switch (status) {
|
|
+ case PIO_COMPLETION_STATUS_OK:
|
|
+ if (reg & PIO_ERR_STATUS) {
|
|
+ strcomp_status = "COMP_ERR";
|
|
+ break;
|
|
+ }
|
|
+ /* Get the read result */
|
|
+ if (val)
|
|
+ *val = advk_readl(pcie, PIO_RD_DATA);
|
|
+ /* No error */
|
|
+ strcomp_status = NULL;
|
|
+ break;
|
|
case PIO_COMPLETION_STATUS_UR:
|
|
strcomp_status = "UR";
|
|
break;
|
|
case PIO_COMPLETION_STATUS_CRS:
|
|
+ /* PCIe r4.0, sec 2.3.2, says:
|
|
+ * If CRS Software Visibility is not enabled, the Root Complex
|
|
+ * must re-issue the Configuration Request as a new Request.
|
|
+ * A Root Complex implementation may choose to limit the number
|
|
+ * of Configuration Request/CRS Completion Status loops before
|
|
+ * determining that something is wrong with the target of the
|
|
+ * Request and taking appropriate action, e.g., complete the
|
|
+ * Request to the host as a failed transaction.
|
|
+ *
|
|
+ * To simplify implementation do not re-issue the Configuration
|
|
+ * Request and complete the Request as a failed transaction.
|
|
+ */
|
|
strcomp_status = "CRS";
|
|
break;
|
|
case PIO_COMPLETION_STATUS_CA:
|
|
@@ -501,6 +651,9 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
|
|
break;
|
|
}
|
|
|
|
+ if (!strcomp_status)
|
|
+ return 0;
|
|
+
|
|
if (reg & PIO_NON_POSTED_REQ)
|
|
str_posted = "Non-posted";
|
|
else
|
|
@@ -508,6 +661,8 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
|
|
|
|
dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
|
|
str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
|
|
+
|
|
+ return -EFAULT;
|
|
}
|
|
|
|
static int advk_pcie_wait_pio(struct advk_pcie *pcie)
|
|
@@ -745,10 +900,13 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
|
|
return PCIBIOS_SET_FAILED;
|
|
}
|
|
|
|
- advk_pcie_check_pio_status(pcie);
|
|
+ /* Check PIO status and get the read result */
|
|
+ ret = advk_pcie_check_pio_status(pcie, val);
|
|
+ if (ret < 0) {
|
|
+ *val = 0xffffffff;
|
|
+ return PCIBIOS_SET_FAILED;
|
|
+ }
|
|
|
|
- /* Get the read result */
|
|
- *val = advk_readl(pcie, PIO_RD_DATA);
|
|
if (size == 1)
|
|
*val = (*val >> (8 * (where & 3))) & 0xff;
|
|
else if (size == 2)
|
|
@@ -812,7 +970,9 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
|
|
if (ret < 0)
|
|
return PCIBIOS_SET_FAILED;
|
|
|
|
- advk_pcie_check_pio_status(pcie);
|
|
+ ret = advk_pcie_check_pio_status(pcie, NULL);
|
|
+ if (ret < 0)
|
|
+ return PCIBIOS_SET_FAILED;
|
|
|
|
return PCIBIOS_SUCCESSFUL;
|
|
}
|
|
@@ -886,22 +1046,28 @@ static void advk_pcie_irq_mask(struct irq_data *d)
|
|
{
|
|
struct advk_pcie *pcie = d->domain->host_data;
|
|
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
|
+ unsigned long flags;
|
|
u32 mask;
|
|
|
|
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
|
|
mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
|
|
mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
|
|
advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
|
|
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
|
|
}
|
|
|
|
static void advk_pcie_irq_unmask(struct irq_data *d)
|
|
{
|
|
struct advk_pcie *pcie = d->domain->host_data;
|
|
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
|
+ unsigned long flags;
|
|
u32 mask;
|
|
|
|
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
|
|
mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
|
|
mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
|
|
advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
|
|
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
|
|
}
|
|
|
|
static int advk_pcie_irq_map(struct irq_domain *h,
|
|
@@ -985,6 +1151,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
|
|
struct irq_chip *irq_chip;
|
|
int ret = 0;
|
|
|
|
+ raw_spin_lock_init(&pcie->irq_lock);
|
|
+
|
|
pcie_intc_node = of_get_next_child(node, NULL);
|
|
if (!pcie_intc_node) {
|
|
dev_err(dev, "No PCIe Intc node found\n");
|
|
@@ -1162,6 +1330,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
|
|
struct device *dev = &pdev->dev;
|
|
struct advk_pcie *pcie;
|
|
struct pci_host_bridge *bridge;
|
|
+ struct resource_entry *entry;
|
|
int ret, irq;
|
|
|
|
bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
|
|
@@ -1172,6 +1341,80 @@ static int advk_pcie_probe(struct platform_device *pdev)
|
|
pcie->pdev = pdev;
|
|
platform_set_drvdata(pdev, pcie);
|
|
|
|
+ resource_list_for_each_entry(entry, &bridge->windows) {
|
|
+ resource_size_t start = entry->res->start;
|
|
+ resource_size_t size = resource_size(entry->res);
|
|
+ unsigned long type = resource_type(entry->res);
|
|
+ u64 win_size;
|
|
+
|
|
+ /*
|
|
+ * Aardvark hardware allows to configure also PCIe window
|
|
+ * for config type 0 and type 1 mapping, but driver uses
|
|
+ * only PIO for issuing configuration transfers which does
|
|
+ * not use PCIe window configuration.
|
|
+ */
|
|
+ if (type != IORESOURCE_MEM && type != IORESOURCE_MEM_64 &&
|
|
+ type != IORESOURCE_IO)
|
|
+ continue;
|
|
+
|
|
+ /*
|
|
+ * Skip transparent memory resources. Default outbound access
|
|
+ * configuration is set to transparent memory access so it
|
|
+ * does not need window configuration.
|
|
+ */
|
|
+ if ((type == IORESOURCE_MEM || type == IORESOURCE_MEM_64) &&
|
|
+ entry->offset == 0)
|
|
+ continue;
|
|
+
|
|
+ /*
|
|
+ * The n-th PCIe window is configured by tuple (match, remap, mask)
|
|
+ * and an access to address A uses this window if A matches the
|
|
+ * match with given mask.
|
|
+ * So every PCIe window size must be a power of two and every start
|
|
+ * address must be aligned to window size. Minimal size is 64 KiB
|
|
+ * because lower 16 bits of mask must be zero. Remapped address
|
|
+ * may have set only bits from the mask.
|
|
+ */
|
|
+ while (pcie->wins_count < OB_WIN_COUNT && size > 0) {
|
|
+ /* Calculate the largest aligned window size */
|
|
+ win_size = (1ULL << (fls64(size)-1)) |
|
|
+ (start ? (1ULL << __ffs64(start)) : 0);
|
|
+ win_size = 1ULL << __ffs64(win_size);
|
|
+ if (win_size < 0x10000)
|
|
+ break;
|
|
+
|
|
+ dev_dbg(dev,
|
|
+ "Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
|
|
+ pcie->wins_count, (unsigned long long)start,
|
|
+ (unsigned long long)start + win_size, type);
|
|
+
|
|
+ if (type == IORESOURCE_IO) {
|
|
+ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO;
|
|
+ pcie->wins[pcie->wins_count].match = pci_pio_to_address(start);
|
|
+ } else {
|
|
+ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM;
|
|
+ pcie->wins[pcie->wins_count].match = start;
|
|
+ }
|
|
+ pcie->wins[pcie->wins_count].remap = start - entry->offset;
|
|
+ pcie->wins[pcie->wins_count].mask = ~(win_size - 1);
|
|
+
|
|
+ if (pcie->wins[pcie->wins_count].remap & (win_size - 1))
|
|
+ break;
|
|
+
|
|
+ start += win_size;
|
|
+ size -= win_size;
|
|
+ pcie->wins_count++;
|
|
+ }
|
|
+
|
|
+ if (size > 0) {
|
|
+ dev_err(&pcie->pdev->dev,
|
|
+ "Invalid PCIe region [0x%llx-0x%llx]\n",
|
|
+ (unsigned long long)entry->res->start,
|
|
+ (unsigned long long)entry->res->end + 1);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ }
|
|
+
|
|
pcie->base = devm_platform_ioremap_resource(pdev, 0);
|
|
if (IS_ERR(pcie->base))
|
|
return PTR_ERR(pcie->base);
|
|
@@ -1252,6 +1495,7 @@ static int advk_pcie_remove(struct platform_device *pdev)
|
|
{
|
|
struct advk_pcie *pcie = platform_get_drvdata(pdev);
|
|
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
|
|
+ int i;
|
|
|
|
pci_lock_rescan_remove();
|
|
pci_stop_root_bus(bridge->bus);
|
|
@@ -1261,6 +1505,10 @@ static int advk_pcie_remove(struct platform_device *pdev)
|
|
advk_pcie_remove_msi_irq_domain(pcie);
|
|
advk_pcie_remove_irq_domain(pcie);
|
|
|
|
+ /* Disable outbound address windows mapping */
|
|
+ for (i = 0; i < OB_WIN_COUNT; i++)
|
|
+ advk_pcie_disable_ob_win(pcie, i);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
|
|
index f3cf7d61924f1..2a9fe7c3aef9f 100644
|
|
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
|
|
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
|
|
@@ -6,6 +6,7 @@
|
|
* (C) Copyright 2014 - 2015, Xilinx, Inc.
|
|
*/
|
|
|
|
+#include <linux/clk.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/irq.h>
|
|
@@ -168,6 +169,7 @@ struct nwl_pcie {
|
|
u8 last_busno;
|
|
struct nwl_msi msi;
|
|
struct irq_domain *legacy_irq_domain;
|
|
+ struct clk *clk;
|
|
raw_spinlock_t leg_mask_lock;
|
|
};
|
|
|
|
@@ -825,6 +827,16 @@ static int nwl_pcie_probe(struct platform_device *pdev)
|
|
return err;
|
|
}
|
|
|
|
+ pcie->clk = devm_clk_get(dev, NULL);
|
|
+ if (IS_ERR(pcie->clk))
|
|
+ return PTR_ERR(pcie->clk);
|
|
+
|
|
+ err = clk_prepare_enable(pcie->clk);
|
|
+ if (err) {
|
|
+ dev_err(dev, "can't enable PCIe ref clock\n");
|
|
+ return err;
|
|
+ }
|
|
+
|
|
err = nwl_pcie_bridge_init(pcie);
|
|
if (err) {
|
|
dev_err(dev, "HW Initialization failed\n");
|
|
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
|
|
index 2548c64194ca9..a7a1c74113483 100644
|
|
--- a/drivers/pci/msi.c
|
|
+++ b/drivers/pci/msi.c
|
|
@@ -783,6 +783,9 @@ static void msix_mask_all(void __iomem *base, int tsize)
|
|
u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
|
|
int i;
|
|
|
|
+ if (pci_msi_ignore_mask)
|
|
+ return;
|
|
+
|
|
for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
|
|
writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
|
|
}
|
|
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
|
|
index 29f5d699fa06d..eae6a9fdd33d4 100644
|
|
--- a/drivers/pci/pci.c
|
|
+++ b/drivers/pci/pci.c
|
|
@@ -1880,11 +1880,7 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
|
|
* so that things like MSI message writing will behave as expected
|
|
* (e.g. if the device really is in D0 at enable time).
|
|
*/
|
|
- if (dev->pm_cap) {
|
|
- u16 pmcsr;
|
|
- pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
|
|
- dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
|
|
- }
|
|
+ pci_update_current_state(dev, dev->current_state);
|
|
|
|
if (atomic_inc_return(&dev->enable_cnt) > 1)
|
|
return 0; /* already enabled */
|
|
@@ -4043,6 +4039,7 @@ phys_addr_t pci_pio_to_address(unsigned long pio)
|
|
|
|
return address;
|
|
}
|
|
+EXPORT_SYMBOL_GPL(pci_pio_to_address);
|
|
|
|
unsigned long __weak pci_address_to_pio(phys_addr_t address)
|
|
{
|
|
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
|
|
index 50a9522ab07df..3779b264dbec3 100644
|
|
--- a/drivers/pci/pcie/portdrv_core.c
|
|
+++ b/drivers/pci/pcie/portdrv_core.c
|
|
@@ -260,8 +260,13 @@ static int get_port_device_capability(struct pci_dev *dev)
|
|
services |= PCIE_PORT_SERVICE_DPC;
|
|
|
|
if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
|
|
- pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
|
|
- services |= PCIE_PORT_SERVICE_BWNOTIF;
|
|
+ pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
|
|
+ u32 linkcap;
|
|
+
|
|
+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &linkcap);
|
|
+ if (linkcap & PCI_EXP_LNKCAP_LBNC)
|
|
+ services |= PCIE_PORT_SERVICE_BWNOTIF;
|
|
+ }
|
|
|
|
return services;
|
|
}
|
|
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
|
|
index a91c944961caa..bad294c352519 100644
|
|
--- a/drivers/pci/quirks.c
|
|
+++ b/drivers/pci/quirks.c
|
|
@@ -3252,6 +3252,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
|
|
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
|
|
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
|
|
PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
|
|
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ASMEDIA, 0x0612, fixup_mpss_256);
|
|
|
|
/*
|
|
* Intel 5000 and 5100 Memory controllers have an erratum with read completion
|
|
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
|
|
index 8b003c890b87b..c9f03418e71e0 100644
|
|
--- a/drivers/pci/syscall.c
|
|
+++ b/drivers/pci/syscall.c
|
|
@@ -22,8 +22,10 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
|
|
long err;
|
|
int cfg_ret;
|
|
|
|
+ err = -EPERM;
|
|
+ dev = NULL;
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
- return -EPERM;
|
|
+ goto error;
|
|
|
|
err = -ENODEV;
|
|
dev = pci_get_domain_bus_and_slot(0, bus, dfn);
|
|
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c
|
|
index 903a4baf3846c..c8b3e396ea275 100644
|
|
--- a/drivers/pinctrl/actions/pinctrl-owl.c
|
|
+++ b/drivers/pinctrl/actions/pinctrl-owl.c
|
|
@@ -444,7 +444,6 @@ static int owl_group_config_get(struct pinctrl_dev *pctrldev,
|
|
*config = pinconf_to_config_packed(param, arg);
|
|
|
|
return ret;
|
|
-
|
|
}
|
|
|
|
static int owl_group_config_set(struct pinctrl_dev *pctrldev,
|
|
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
|
|
index 20b477cd5a30a..6e6825d17a1d1 100644
|
|
--- a/drivers/pinctrl/core.c
|
|
+++ b/drivers/pinctrl/core.c
|
|
@@ -2119,7 +2119,6 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
|
|
return ERR_PTR(error);
|
|
|
|
return pctldev;
|
|
-
|
|
}
|
|
EXPORT_SYMBOL_GPL(pinctrl_register);
|
|
|
|
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
|
|
index 08d110078c439..70186448d2f4a 100644
|
|
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
|
|
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
|
|
@@ -290,7 +290,6 @@ static const struct pinctrl_ops imx1_pctrl_ops = {
|
|
.pin_dbg_show = imx1_pin_dbg_show,
|
|
.dt_node_to_map = imx1_dt_node_to_map,
|
|
.dt_free_map = imx1_dt_free_map,
|
|
-
|
|
};
|
|
|
|
static int imx1_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
|
|
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
|
|
index 68894e9e05d2e..5cb018f988003 100644
|
|
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
|
|
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
|
|
@@ -167,10 +167,14 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
|
|
PIN_GRP_GPIO("jtag", 20, 5, BIT(0), "jtag"),
|
|
PIN_GRP_GPIO("sdio0", 8, 3, BIT(1), "sdio"),
|
|
PIN_GRP_GPIO("emmc_nb", 27, 9, BIT(2), "emmc"),
|
|
- PIN_GRP_GPIO("pwm0", 11, 1, BIT(3), "pwm"),
|
|
- PIN_GRP_GPIO("pwm1", 12, 1, BIT(4), "pwm"),
|
|
- PIN_GRP_GPIO("pwm2", 13, 1, BIT(5), "pwm"),
|
|
- PIN_GRP_GPIO("pwm3", 14, 1, BIT(6), "pwm"),
|
|
+ PIN_GRP_GPIO_3("pwm0", 11, 1, BIT(3) | BIT(20), 0, BIT(20), BIT(3),
|
|
+ "pwm", "led"),
|
|
+ PIN_GRP_GPIO_3("pwm1", 12, 1, BIT(4) | BIT(21), 0, BIT(21), BIT(4),
|
|
+ "pwm", "led"),
|
|
+ PIN_GRP_GPIO_3("pwm2", 13, 1, BIT(5) | BIT(22), 0, BIT(22), BIT(5),
|
|
+ "pwm", "led"),
|
|
+ PIN_GRP_GPIO_3("pwm3", 14, 1, BIT(6) | BIT(23), 0, BIT(23), BIT(6),
|
|
+ "pwm", "led"),
|
|
PIN_GRP_GPIO("pmic1", 7, 1, BIT(7), "pmic"),
|
|
PIN_GRP_GPIO("pmic0", 6, 1, BIT(8), "pmic"),
|
|
PIN_GRP_GPIO("i2c2", 2, 2, BIT(9), "i2c"),
|
|
@@ -184,11 +188,6 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
|
|
PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
|
|
BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
|
|
18, 2, "gpio", "uart"),
|
|
- PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
|
|
- PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
|
|
- PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
|
|
- PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
|
|
-
|
|
};
|
|
|
|
static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
|
|
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
|
|
index 72edc675431ce..9015486e38c18 100644
|
|
--- a/drivers/pinctrl/pinctrl-at91.c
|
|
+++ b/drivers/pinctrl/pinctrl-at91.c
|
|
@@ -733,7 +733,6 @@ static const struct at91_pinctrl_mux_ops sam9x60_ops = {
|
|
.get_slewrate = at91_mux_sam9x60_get_slewrate,
|
|
.set_slewrate = at91_mux_sam9x60_set_slewrate,
|
|
.irq_type = alt_gpio_irq_type,
|
|
-
|
|
};
|
|
|
|
static struct at91_pinctrl_mux_ops sama5d3_ops = {
|
|
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
|
|
index 033d142f0c272..e0df5ad6741dc 100644
|
|
--- a/drivers/pinctrl/pinctrl-ingenic.c
|
|
+++ b/drivers/pinctrl/pinctrl-ingenic.c
|
|
@@ -363,7 +363,7 @@ static const struct ingenic_chip_info jz4725b_chip_info = {
|
|
};
|
|
|
|
static const u32 jz4760_pull_ups[6] = {
|
|
- 0xffffffff, 0xfffcf3ff, 0xffffffff, 0xffffcfff, 0xfffffb7c, 0xfffff00f,
|
|
+ 0xffffffff, 0xfffcf3ff, 0xffffffff, 0xffffcfff, 0xfffffb7c, 0x0000000f,
|
|
};
|
|
|
|
static const u32 jz4760_pull_downs[6] = {
|
|
@@ -618,11 +618,11 @@ static const struct ingenic_chip_info jz4760_chip_info = {
|
|
};
|
|
|
|
static const u32 jz4770_pull_ups[6] = {
|
|
- 0x3fffffff, 0xfff0030c, 0xffffffff, 0xffff4fff, 0xfffffb7c, 0xffa7f00f,
|
|
+ 0x3fffffff, 0xfff0f3fc, 0xffffffff, 0xffff4fff, 0xfffffb7c, 0x0024f00f,
|
|
};
|
|
|
|
static const u32 jz4770_pull_downs[6] = {
|
|
- 0x00000000, 0x000f0c03, 0x00000000, 0x0000b000, 0x00000483, 0x00580ff0,
|
|
+ 0x00000000, 0x000f0c03, 0x00000000, 0x0000b000, 0x00000483, 0x005b0ff0,
|
|
};
|
|
|
|
static int jz4770_uart0_data_pins[] = { 0xa0, 0xa3, };
|
|
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
|
|
index 12cc4eb186377..17aa0d542d925 100644
|
|
--- a/drivers/pinctrl/pinctrl-single.c
|
|
+++ b/drivers/pinctrl/pinctrl-single.c
|
|
@@ -1222,6 +1222,7 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
|
|
|
|
if (PCS_HAS_PINCONF) {
|
|
dev_err(pcs->dev, "pinconf not supported\n");
|
|
+ res = -ENOTSUPP;
|
|
goto free_pingroups;
|
|
}
|
|
|
|
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
|
|
index 7b8c7a0b13de0..43d9e6c7fd81f 100644
|
|
--- a/drivers/pinctrl/pinctrl-st.c
|
|
+++ b/drivers/pinctrl/pinctrl-st.c
|
|
@@ -541,7 +541,6 @@ static void st_pinconf_set_retime_packed(struct st_pinctrl *info,
|
|
st_regmap_field_bit_set_clear_pin(rt_p->delay_0, delay & 0x1, pin);
|
|
/* 2 bit delay, msb */
|
|
st_regmap_field_bit_set_clear_pin(rt_p->delay_1, delay & 0x2, pin);
|
|
-
|
|
}
|
|
|
|
static void st_pinconf_set_retime_dedicated(struct st_pinctrl *info,
|
|
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
|
|
index 008c83107a3ca..5fa2488fae87a 100644
|
|
--- a/drivers/pinctrl/pinctrl-stmfx.c
|
|
+++ b/drivers/pinctrl/pinctrl-stmfx.c
|
|
@@ -566,7 +566,7 @@ static irqreturn_t stmfx_pinctrl_irq_thread_fn(int irq, void *dev_id)
|
|
u8 pending[NR_GPIO_REGS];
|
|
u8 src[NR_GPIO_REGS] = {0, 0, 0};
|
|
unsigned long n, status;
|
|
- int ret;
|
|
+ int i, ret;
|
|
|
|
ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_IRQ_GPI_PENDING,
|
|
&pending, NR_GPIO_REGS);
|
|
@@ -576,7 +576,9 @@ static irqreturn_t stmfx_pinctrl_irq_thread_fn(int irq, void *dev_id)
|
|
regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC,
|
|
src, NR_GPIO_REGS);
|
|
|
|
- status = *(unsigned long *)pending;
|
|
+ BUILD_BUG_ON(NR_GPIO_REGS > sizeof(status));
|
|
+ for (i = 0, status = 0; i < NR_GPIO_REGS; i++)
|
|
+ status |= (unsigned long)pending[i] << (i * 8);
|
|
for_each_set_bit(n, &status, gc->ngpio) {
|
|
handle_nested_irq(irq_find_mapping(gc->irq.domain, n));
|
|
stmfx_pinctrl_irq_toggle_trigger(pctl, n);
|
|
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
|
|
index c110f780407bd..484a3b9e875c1 100644
|
|
--- a/drivers/pinctrl/pinctrl-sx150x.c
|
|
+++ b/drivers/pinctrl/pinctrl-sx150x.c
|
|
@@ -443,7 +443,6 @@ static void sx150x_gpio_set(struct gpio_chip *chip, unsigned int offset,
|
|
sx150x_gpio_oscio_set(pctl, value);
|
|
else
|
|
__sx150x_gpio_set(pctl, offset, value);
|
|
-
|
|
}
|
|
|
|
static void sx150x_gpio_set_multiple(struct gpio_chip *chip,
|
|
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
|
|
index 2834d2c1338c8..c51793f6546f1 100644
|
|
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
|
|
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
|
|
@@ -1310,7 +1310,6 @@ static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
|
|
.ngpios = 151,
|
|
.wakeirq_map = sdm845_pdc_map,
|
|
.nwakeirq_map = ARRAY_SIZE(sdm845_pdc_map),
|
|
-
|
|
};
|
|
|
|
static const struct msm_pinctrl_soc_data sdm845_acpi_pinctrl = {
|
|
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
|
|
index 681d8dcf37e34..92e7f2602847c 100644
|
|
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
|
|
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
|
|
@@ -617,7 +617,6 @@ static void pm8xxx_mpp_dbg_show_one(struct seq_file *s,
|
|
}
|
|
break;
|
|
}
|
|
-
|
|
}
|
|
|
|
static void pm8xxx_mpp_dbg_show(struct seq_file *s, struct gpio_chip *chip)
|
|
diff --git a/drivers/pinctrl/renesas/pfc-r8a77950.c b/drivers/pinctrl/renesas/pfc-r8a77950.c
|
|
index 04812e62f3a47..9d89da2319e56 100644
|
|
--- a/drivers/pinctrl/renesas/pfc-r8a77950.c
|
|
+++ b/drivers/pinctrl/renesas/pfc-r8a77950.c
|
|
@@ -1668,7 +1668,6 @@ static const unsigned int avb_mii_pins[] = {
|
|
PIN_AVB_RX_CTL, PIN_AVB_RXC, PIN_AVB_RD0,
|
|
PIN_AVB_RD1, PIN_AVB_RD2, PIN_AVB_RD3,
|
|
PIN_AVB_TXCREFCLK,
|
|
-
|
|
};
|
|
static const unsigned int avb_mii_mux[] = {
|
|
AVB_TX_CTL_MARK, AVB_TXC_MARK, AVB_TD0_MARK,
|
|
diff --git a/drivers/pinctrl/renesas/pfc-r8a77951.c b/drivers/pinctrl/renesas/pfc-r8a77951.c
|
|
index a94ebe0bf5d06..4aea6e4b71571 100644
|
|
--- a/drivers/pinctrl/renesas/pfc-r8a77951.c
|
|
+++ b/drivers/pinctrl/renesas/pfc-r8a77951.c
|
|
@@ -1727,7 +1727,6 @@ static const unsigned int avb_mii_pins[] = {
|
|
PIN_AVB_RX_CTL, PIN_AVB_RXC, PIN_AVB_RD0,
|
|
PIN_AVB_RD1, PIN_AVB_RD2, PIN_AVB_RD3,
|
|
PIN_AVB_TXCREFCLK,
|
|
-
|
|
};
|
|
static const unsigned int avb_mii_mux[] = {
|
|
AVB_TX_CTL_MARK, AVB_TXC_MARK, AVB_TD0_MARK,
|
|
diff --git a/drivers/pinctrl/renesas/pfc-r8a7796.c b/drivers/pinctrl/renesas/pfc-r8a7796.c
|
|
index 3878d6b0db149..a67fa0e4df7c7 100644
|
|
--- a/drivers/pinctrl/renesas/pfc-r8a7796.c
|
|
+++ b/drivers/pinctrl/renesas/pfc-r8a7796.c
|
|
@@ -1732,7 +1732,6 @@ static const unsigned int avb_mii_pins[] = {
|
|
PIN_AVB_RX_CTL, PIN_AVB_RXC, PIN_AVB_RD0,
|
|
PIN_AVB_RD1, PIN_AVB_RD2, PIN_AVB_RD3,
|
|
PIN_AVB_TXCREFCLK,
|
|
-
|
|
};
|
|
static const unsigned int avb_mii_mux[] = {
|
|
AVB_TX_CTL_MARK, AVB_TXC_MARK, AVB_TD0_MARK,
|
|
diff --git a/drivers/pinctrl/renesas/pfc-r8a77965.c b/drivers/pinctrl/renesas/pfc-r8a77965.c
|
|
index 7a50b9b69a7dc..7db2b7f2ff678 100644
|
|
--- a/drivers/pinctrl/renesas/pfc-r8a77965.c
|
|
+++ b/drivers/pinctrl/renesas/pfc-r8a77965.c
|
|
@@ -1736,7 +1736,6 @@ static const unsigned int avb_mii_pins[] = {
|
|
PIN_AVB_RX_CTL, PIN_AVB_RXC, PIN_AVB_RD0,
|
|
PIN_AVB_RD1, PIN_AVB_RD2, PIN_AVB_RD3,
|
|
PIN_AVB_TXCREFCLK,
|
|
-
|
|
};
|
|
static const unsigned int avb_mii_mux[] = {
|
|
AVB_TX_CTL_MARK, AVB_TXC_MARK, AVB_TD0_MARK,
|
|
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
|
|
index 608eb5a07248e..7f809a57bee50 100644
|
|
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
|
|
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
|
|
@@ -918,7 +918,7 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
|
|
pin_bank->grange.pin_base = drvdata->pin_base
|
|
+ pin_bank->pin_base;
|
|
pin_bank->grange.base = pin_bank->grange.pin_base;
|
|
- pin_bank->grange.npins = pin_bank->gpio_chip.ngpio;
|
|
+ pin_bank->grange.npins = pin_bank->nr_pins;
|
|
pin_bank->grange.gc = &pin_bank->gpio_chip;
|
|
pinctrl_add_gpio_range(drvdata->pctl_dev, &pin_bank->grange);
|
|
}
|
|
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
|
|
index ea5149efcbeae..9f698a7aad129 100644
|
|
--- a/drivers/platform/chrome/cros_ec_proto.c
|
|
+++ b/drivers/platform/chrome/cros_ec_proto.c
|
|
@@ -279,6 +279,15 @@ static int cros_ec_host_command_proto_query(struct cros_ec_device *ec_dev,
|
|
msg->insize = sizeof(struct ec_response_get_protocol_info);
|
|
|
|
ret = send_command(ec_dev, msg);
|
|
+ /*
|
|
+ * Send command once again when timeout occurred.
|
|
+ * Fingerprint MCU (FPMCU) is restarted during system boot which
|
|
+ * introduces small window in which FPMCU won't respond for any
|
|
+ * messages sent by kernel. There is no need to wait before next
|
|
+ * attempt because we waited at least EC_MSG_DEADLINE_MS.
|
|
+ */
|
|
+ if (ret == -ETIMEDOUT)
|
|
+ ret = send_command(ec_dev, msg);
|
|
|
|
if (ret < 0) {
|
|
dev_dbg(ec_dev->dev,
|
|
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
|
|
index c97bd4a452422..5821e9d9a4ce4 100644
|
|
--- a/drivers/platform/x86/dell-smbios-wmi.c
|
|
+++ b/drivers/platform/x86/dell-smbios-wmi.c
|
|
@@ -69,6 +69,7 @@ static int run_smbios_call(struct wmi_device *wdev)
|
|
if (obj->type == ACPI_TYPE_INTEGER)
|
|
dev_dbg(&wdev->dev, "SMBIOS call failed: %llu\n",
|
|
obj->integer.value);
|
|
+ kfree(output.pointer);
|
|
return -EIO;
|
|
}
|
|
memcpy(&priv->buf->std, obj->buffer.pointer, obj->buffer.length);
|
|
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
|
|
index 48d3985eaa8ad..69bb0f56e492a 100644
|
|
--- a/drivers/power/supply/max17042_battery.c
|
|
+++ b/drivers/power/supply/max17042_battery.c
|
|
@@ -859,8 +859,12 @@ static irqreturn_t max17042_thread_handler(int id, void *dev)
|
|
{
|
|
struct max17042_chip *chip = dev;
|
|
u32 val;
|
|
+ int ret;
|
|
+
|
|
+ ret = regmap_read(chip->regmap, MAX17042_STATUS, &val);
|
|
+ if (ret)
|
|
+ return IRQ_HANDLED;
|
|
|
|
- regmap_read(chip->regmap, MAX17042_STATUS, &val);
|
|
if ((val & STATUS_INTR_SOCMIN_BIT) ||
|
|
(val & STATUS_INTR_SOCMAX_BIT)) {
|
|
dev_info(&chip->client->dev, "SOC threshold INTR\n");
|
|
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
|
|
index e3840386f430c..6eec86b0b1751 100644
|
|
--- a/drivers/rtc/rtc-tps65910.c
|
|
+++ b/drivers/rtc/rtc-tps65910.c
|
|
@@ -469,6 +469,6 @@ static struct platform_driver tps65910_rtc_driver = {
|
|
};
|
|
|
|
module_platform_driver(tps65910_rtc_driver);
|
|
-MODULE_ALIAS("platform:rtc-tps65910");
|
|
+MODULE_ALIAS("platform:tps65910-rtc");
|
|
MODULE_AUTHOR("Venu Byravarasu <vbyravarasu@nvidia.com>");
|
|
MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
|
|
index f9a31c7819ae6..3e29c26f01856 100644
|
|
--- a/drivers/s390/cio/qdio_main.c
|
|
+++ b/drivers/s390/cio/qdio_main.c
|
|
@@ -1025,6 +1025,33 @@ static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
|
|
}
|
|
}
|
|
|
|
+static int qdio_cancel_ccw(struct qdio_irq *irq, int how)
|
|
+{
|
|
+ struct ccw_device *cdev = irq->cdev;
|
|
+ int rc;
|
|
+
|
|
+ spin_lock_irq(get_ccwdev_lock(cdev));
|
|
+ qdio_set_state(irq, QDIO_IRQ_STATE_CLEANUP);
|
|
+ if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
|
|
+ rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
|
|
+ else
|
|
+ /* default behaviour is halt */
|
|
+ rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
|
|
+ spin_unlock_irq(get_ccwdev_lock(cdev));
|
|
+ if (rc) {
|
|
+ DBF_ERROR("%4x SHUTD ERR", irq->schid.sch_no);
|
|
+ DBF_ERROR("rc:%4d", rc);
|
|
+ return rc;
|
|
+ }
|
|
+
|
|
+ wait_event_interruptible_timeout(cdev->private->wait_q,
|
|
+ irq->state == QDIO_IRQ_STATE_INACTIVE ||
|
|
+ irq->state == QDIO_IRQ_STATE_ERR,
|
|
+ 10 * HZ);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
/**
|
|
* qdio_shutdown - shut down a qdio subchannel
|
|
* @cdev: associated ccw device
|
|
@@ -1063,27 +1090,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
|
|
qdio_shutdown_queues(irq_ptr);
|
|
qdio_shutdown_debug_entries(irq_ptr);
|
|
|
|
- /* cleanup subchannel */
|
|
- spin_lock_irq(get_ccwdev_lock(cdev));
|
|
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
|
|
- if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
|
|
- rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
|
|
- else
|
|
- /* default behaviour is halt */
|
|
- rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
|
|
- spin_unlock_irq(get_ccwdev_lock(cdev));
|
|
- if (rc) {
|
|
- DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
|
|
- DBF_ERROR("rc:%4d", rc);
|
|
- goto no_cleanup;
|
|
- }
|
|
-
|
|
- wait_event_interruptible_timeout(cdev->private->wait_q,
|
|
- irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
|
|
- irq_ptr->state == QDIO_IRQ_STATE_ERR,
|
|
- 10 * HZ);
|
|
-
|
|
-no_cleanup:
|
|
+ rc = qdio_cancel_ccw(irq_ptr, how);
|
|
qdio_shutdown_thinint(irq_ptr);
|
|
qdio_shutdown_irq(irq_ptr);
|
|
|
|
@@ -1243,6 +1250,7 @@ int qdio_establish(struct ccw_device *cdev,
|
|
{
|
|
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
|
struct subchannel_id schid;
|
|
+ long timeout;
|
|
int rc;
|
|
|
|
ccw_device_get_schid(cdev, &schid);
|
|
@@ -1268,11 +1276,8 @@ int qdio_establish(struct ccw_device *cdev,
|
|
qdio_setup_irq(irq_ptr, init_data);
|
|
|
|
rc = qdio_establish_thinint(irq_ptr);
|
|
- if (rc) {
|
|
- qdio_shutdown_irq(irq_ptr);
|
|
- mutex_unlock(&irq_ptr->setup_mutex);
|
|
- return rc;
|
|
- }
|
|
+ if (rc)
|
|
+ goto err_thinint;
|
|
|
|
/* establish q */
|
|
irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
|
|
@@ -1288,15 +1293,16 @@ int qdio_establish(struct ccw_device *cdev,
|
|
if (rc) {
|
|
DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
|
|
DBF_ERROR("rc:%4x", rc);
|
|
- qdio_shutdown_thinint(irq_ptr);
|
|
- qdio_shutdown_irq(irq_ptr);
|
|
- mutex_unlock(&irq_ptr->setup_mutex);
|
|
- return rc;
|
|
+ goto err_ccw_start;
|
|
}
|
|
|
|
- wait_event_interruptible_timeout(cdev->private->wait_q,
|
|
- irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
|
|
- irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
|
|
+ timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
|
|
+ irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
|
|
+ irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
|
|
+ if (timeout <= 0) {
|
|
+ rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
|
|
+ goto err_ccw_timeout;
|
|
+ }
|
|
|
|
if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
|
|
mutex_unlock(&irq_ptr->setup_mutex);
|
|
@@ -1315,6 +1321,16 @@ int qdio_establish(struct ccw_device *cdev,
|
|
qdio_print_subchannel_info(irq_ptr);
|
|
qdio_setup_debug_entries(irq_ptr);
|
|
return 0;
|
|
+
|
|
+err_ccw_timeout:
|
|
+ qdio_cancel_ccw(irq_ptr, QDIO_FLAG_CLEANUP_USING_CLEAR);
|
|
+err_ccw_start:
|
|
+ qdio_shutdown_thinint(irq_ptr);
|
|
+err_thinint:
|
|
+ qdio_shutdown_irq(irq_ptr);
|
|
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
|
|
+ mutex_unlock(&irq_ptr->setup_mutex);
|
|
+ return rc;
|
|
}
|
|
EXPORT_SYMBOL_GPL(qdio_establish);
|
|
|
|
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
|
|
index 7231de2767a96..39ef074069971 100644
|
|
--- a/drivers/scsi/BusLogic.c
|
|
+++ b/drivers/scsi/BusLogic.c
|
|
@@ -1845,7 +1845,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
|
|
else
|
|
blogic_info("None, ", adapter);
|
|
if (adapter->bios_addr > 0)
|
|
- blogic_info("BIOS Address: 0x%lX, ", adapter,
|
|
+ blogic_info("BIOS Address: 0x%X, ", adapter,
|
|
adapter->bios_addr);
|
|
else
|
|
blogic_info("BIOS Address: None, ", adapter);
|
|
@@ -3603,7 +3603,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
|
|
if (buf[0] != '\n' || len > 1)
|
|
printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf);
|
|
} else
|
|
- printk("%s", buf);
|
|
+ pr_cont("%s", buf);
|
|
} else {
|
|
if (begin) {
|
|
if (adapter != NULL && adapter->adapter_initd)
|
|
@@ -3611,7 +3611,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
|
|
else
|
|
printk("%s%s", blogic_msglevelmap[msglevel], buf);
|
|
} else
|
|
- printk("%s", buf);
|
|
+ pr_cont("%s", buf);
|
|
}
|
|
begin = (buf[len - 1] == '\n');
|
|
}
|
|
diff --git a/drivers/scsi/pcmcia/fdomain_cs.c b/drivers/scsi/pcmcia/fdomain_cs.c
|
|
index e42acf314d068..33df6a9ba9b5f 100644
|
|
--- a/drivers/scsi/pcmcia/fdomain_cs.c
|
|
+++ b/drivers/scsi/pcmcia/fdomain_cs.c
|
|
@@ -45,8 +45,10 @@ static int fdomain_probe(struct pcmcia_device *link)
|
|
goto fail_disable;
|
|
|
|
if (!request_region(link->resource[0]->start, FDOMAIN_REGION_SIZE,
|
|
- "fdomain_cs"))
|
|
+ "fdomain_cs")) {
|
|
+ ret = -EBUSY;
|
|
goto fail_disable;
|
|
+ }
|
|
|
|
sh = fdomain_create(link->resource[0]->start, link->irq, 7, &link->dev);
|
|
if (!sh) {
|
|
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
|
|
index 846a02de4d510..c63dcc39f76c2 100644
|
|
--- a/drivers/scsi/qedf/qedf_main.c
|
|
+++ b/drivers/scsi/qedf/qedf_main.c
|
|
@@ -3000,7 +3000,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
|
|
{
|
|
u32 *list;
|
|
int i;
|
|
- int status = 0, rc;
|
|
+ int status;
|
|
u32 *pbl;
|
|
dma_addr_t page;
|
|
int num_pages;
|
|
@@ -3012,7 +3012,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
|
|
*/
|
|
if (!qedf->num_queues) {
|
|
QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
|
|
- return 1;
|
|
+ return -ENOMEM;
|
|
}
|
|
|
|
/*
|
|
@@ -3020,7 +3020,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
|
|
* addresses of our queues
|
|
*/
|
|
if (!qedf->p_cpuq) {
|
|
- status = 1;
|
|
+ status = -EINVAL;
|
|
QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
|
|
goto mem_alloc_failure;
|
|
}
|
|
@@ -3036,8 +3036,8 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
|
|
"qedf->global_queues=%p.\n", qedf->global_queues);
|
|
|
|
/* Allocate DMA coherent buffers for BDQ */
|
|
- rc = qedf_alloc_bdq(qedf);
|
|
- if (rc) {
|
|
+ status = qedf_alloc_bdq(qedf);
|
|
+ if (status) {
|
|
QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
|
|
goto mem_alloc_failure;
|
|
}
|
|
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
|
|
index b33eff9ea80ba..299d0369e4f08 100644
|
|
--- a/drivers/scsi/qedi/qedi_main.c
|
|
+++ b/drivers/scsi/qedi/qedi_main.c
|
|
@@ -1623,7 +1623,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
|
|
{
|
|
u32 *list;
|
|
int i;
|
|
- int status = 0, rc;
|
|
+ int status;
|
|
u32 *pbl;
|
|
dma_addr_t page;
|
|
int num_pages;
|
|
@@ -1634,14 +1634,14 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
|
|
*/
|
|
if (!qedi->num_queues) {
|
|
QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n");
|
|
- return 1;
|
|
+ return -ENOMEM;
|
|
}
|
|
|
|
/* Make sure we allocated the PBL that will contain the physical
|
|
* addresses of our queues
|
|
*/
|
|
if (!qedi->p_cpuq) {
|
|
- status = 1;
|
|
+ status = -EINVAL;
|
|
goto mem_alloc_failure;
|
|
}
|
|
|
|
@@ -1656,13 +1656,13 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
|
|
"qedi->global_queues=%p.\n", qedi->global_queues);
|
|
|
|
/* Allocate DMA coherent buffers for BDQ */
|
|
- rc = qedi_alloc_bdq(qedi);
|
|
- if (rc)
|
|
+ status = qedi_alloc_bdq(qedi);
|
|
+ if (status)
|
|
goto mem_alloc_failure;
|
|
|
|
/* Allocate DMA coherent buffers for NVM_ISCSI_CFG */
|
|
- rc = qedi_alloc_nvm_iscsi_cfg(qedi);
|
|
- if (rc)
|
|
+ status = qedi_alloc_nvm_iscsi_cfg(qedi);
|
|
+ if (status)
|
|
goto mem_alloc_failure;
|
|
|
|
/* Allocate a CQ and an associated PBL for each MSI-X
|
|
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
|
|
index b7a1dc24db380..f6c76a063294b 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_nvme.c
|
|
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
|
|
@@ -91,8 +91,9 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
|
|
struct qla_hw_data *ha;
|
|
struct qla_qpair *qpair;
|
|
|
|
- if (!qidx)
|
|
- qidx++;
|
|
+ /* Map admin queue and 1st IO queue to index 0 */
|
|
+ if (qidx)
|
|
+ qidx--;
|
|
|
|
vha = (struct scsi_qla_host *)lport->private;
|
|
ha = vha->hw;
|
|
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
|
|
index 21be50b35bc27..4af794c46d175 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_os.c
|
|
+++ b/drivers/scsi/qla2xxx/qla_os.c
|
|
@@ -14,6 +14,7 @@
|
|
#include <linux/slab.h>
|
|
#include <linux/blk-mq-pci.h>
|
|
#include <linux/refcount.h>
|
|
+#include <linux/crash_dump.h>
|
|
|
|
#include <scsi/scsi_tcq.h>
|
|
#include <scsi/scsicam.h>
|
|
@@ -2828,6 +2829,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
return ret;
|
|
}
|
|
|
|
+ if (is_kdump_kernel()) {
|
|
+ ql2xmqsupport = 0;
|
|
+ ql2xallocfwdump = 0;
|
|
+ }
|
|
+
|
|
/* This may fail but that's ok */
|
|
pci_enable_pcie_error_reporting(pdev);
|
|
|
|
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
|
|
index 5083e5d2b4675..de73ade70c24c 100644
|
|
--- a/drivers/scsi/smartpqi/smartpqi_init.c
|
|
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
|
|
@@ -1207,6 +1207,7 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
|
|
"Requested %d bytes, received %d bytes",
|
|
raid_map_size,
|
|
get_unaligned_le32(&raid_map->structure_size));
|
|
+ rc = -EINVAL;
|
|
goto error;
|
|
}
|
|
}
|
|
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
|
|
index f54b494ca4486..3f4f3d6f48f9f 100644
|
|
--- a/drivers/scsi/ufs/ufs-exynos.c
|
|
+++ b/drivers/scsi/ufs/ufs-exynos.c
|
|
@@ -259,7 +259,7 @@ static int exynos_ufs_get_clk_info(struct exynos_ufs *ufs)
|
|
struct ufs_hba *hba = ufs->hba;
|
|
struct list_head *head = &hba->clk_list_head;
|
|
struct ufs_clk_info *clki;
|
|
- u32 pclk_rate;
|
|
+ unsigned long pclk_rate;
|
|
u32 f_min, f_max;
|
|
u8 div = 0;
|
|
int ret = 0;
|
|
@@ -298,7 +298,7 @@ static int exynos_ufs_get_clk_info(struct exynos_ufs *ufs)
|
|
}
|
|
|
|
if (unlikely(pclk_rate < f_min || pclk_rate > f_max)) {
|
|
- dev_err(hba->dev, "not available pclk range %d\n", pclk_rate);
|
|
+ dev_err(hba->dev, "not available pclk range %lu\n", pclk_rate);
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
diff --git a/drivers/scsi/ufs/ufs-exynos.h b/drivers/scsi/ufs/ufs-exynos.h
|
|
index 76d6e39efb2f0..541b577c371ce 100644
|
|
--- a/drivers/scsi/ufs/ufs-exynos.h
|
|
+++ b/drivers/scsi/ufs/ufs-exynos.h
|
|
@@ -197,7 +197,7 @@ struct exynos_ufs {
|
|
u32 pclk_div;
|
|
u32 pclk_avail_min;
|
|
u32 pclk_avail_max;
|
|
- u32 mclk_rate;
|
|
+ unsigned long mclk_rate;
|
|
int avail_ln_rx;
|
|
int avail_ln_tx;
|
|
int rx_sel_idx;
|
|
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
|
|
index 854c96e630077..4dabd09400c6d 100644
|
|
--- a/drivers/scsi/ufs/ufshcd.c
|
|
+++ b/drivers/scsi/ufs/ufshcd.c
|
|
@@ -3249,9 +3249,11 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
|
|
|
|
if (is_kmalloc) {
|
|
/* Make sure we don't copy more data than available */
|
|
- if (param_offset + param_size > buff_len)
|
|
- param_size = buff_len - param_offset;
|
|
- memcpy(param_read_buf, &desc_buf[param_offset], param_size);
|
|
+ if (param_offset >= buff_len)
|
|
+ ret = -EINVAL;
|
|
+ else
|
|
+ memcpy(param_read_buf, &desc_buf[param_offset],
|
|
+ min_t(u32, param_size, buff_len - param_offset));
|
|
}
|
|
out:
|
|
if (is_kmalloc)
|
|
diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
|
|
index 01ed21e8bfee5..040c7dc1d4792 100644
|
|
--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
|
|
+++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
|
|
@@ -46,7 +46,7 @@ static int aspeed_lpc_ctrl_mmap(struct file *file, struct vm_area_struct *vma)
|
|
unsigned long vsize = vma->vm_end - vma->vm_start;
|
|
pgprot_t prot = vma->vm_page_prot;
|
|
|
|
- if (vma->vm_pgoff + vsize > lpc_ctrl->mem_base + lpc_ctrl->mem_size)
|
|
+ if (vma->vm_pgoff + vma_pages(vma) > lpc_ctrl->mem_size >> PAGE_SHIFT)
|
|
return -EINVAL;
|
|
|
|
/* ast2400/2500 AHB accesses are not cache coherent */
|
|
diff --git a/drivers/soc/aspeed/aspeed-p2a-ctrl.c b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
|
|
index b60fbeaffcbd0..20b5fb2a207cc 100644
|
|
--- a/drivers/soc/aspeed/aspeed-p2a-ctrl.c
|
|
+++ b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
|
|
@@ -110,7 +110,7 @@ static int aspeed_p2a_mmap(struct file *file, struct vm_area_struct *vma)
|
|
vsize = vma->vm_end - vma->vm_start;
|
|
prot = vma->vm_page_prot;
|
|
|
|
- if (vma->vm_pgoff + vsize > ctrl->mem_base + ctrl->mem_size)
|
|
+ if (vma->vm_pgoff + vma_pages(vma) > ctrl->mem_size >> PAGE_SHIFT)
|
|
return -EINVAL;
|
|
|
|
/* ast2400/2500 AHB accesses are not cache coherent */
|
|
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
|
|
index ed2c687c16b31..4fe88d4690e2b 100644
|
|
--- a/drivers/soc/qcom/qcom_aoss.c
|
|
+++ b/drivers/soc/qcom/qcom_aoss.c
|
|
@@ -476,12 +476,12 @@ static int qmp_cooling_device_add(struct qmp *qmp,
|
|
static int qmp_cooling_devices_register(struct qmp *qmp)
|
|
{
|
|
struct device_node *np, *child;
|
|
- int count = QMP_NUM_COOLING_RESOURCES;
|
|
+ int count = 0;
|
|
int ret;
|
|
|
|
np = qmp->dev->of_node;
|
|
|
|
- qmp->cooling_devs = devm_kcalloc(qmp->dev, count,
|
|
+ qmp->cooling_devs = devm_kcalloc(qmp->dev, QMP_NUM_COOLING_RESOURCES,
|
|
sizeof(*qmp->cooling_devs),
|
|
GFP_KERNEL);
|
|
|
|
@@ -497,12 +497,16 @@ static int qmp_cooling_devices_register(struct qmp *qmp)
|
|
goto unroll;
|
|
}
|
|
|
|
+ if (!count)
|
|
+ devm_kfree(qmp->dev, qmp->cooling_devs);
|
|
+
|
|
return 0;
|
|
|
|
unroll:
|
|
while (--count >= 0)
|
|
thermal_cooling_device_unregister
|
|
(qmp->cooling_devs[count].cdev);
|
|
+ devm_kfree(qmp->dev, qmp->cooling_devs);
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
|
|
index 6a1e862b16c38..dad4326a2a714 100644
|
|
--- a/drivers/soundwire/intel.c
|
|
+++ b/drivers/soundwire/intel.c
|
|
@@ -537,12 +537,14 @@ static int intel_link_power_down(struct sdw_intel *sdw)
|
|
|
|
mutex_lock(sdw->link_res->shim_lock);
|
|
|
|
- intel_shim_master_ip_to_glue(sdw);
|
|
-
|
|
if (!(*shim_mask & BIT(link_id)))
|
|
dev_err(sdw->cdns.dev,
|
|
"%s: Unbalanced power-up/down calls\n", __func__);
|
|
|
|
+ sdw->cdns.link_up = false;
|
|
+
|
|
+ intel_shim_master_ip_to_glue(sdw);
|
|
+
|
|
*shim_mask &= ~BIT(link_id);
|
|
|
|
if (!*shim_mask) {
|
|
@@ -559,20 +561,21 @@ static int intel_link_power_down(struct sdw_intel *sdw)
|
|
link_control &= spa_mask;
|
|
|
|
ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
|
|
+ if (ret < 0) {
|
|
+ dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
|
|
+
|
|
+ /*
|
|
+ * we leave the sdw->cdns.link_up flag as false since we've disabled
|
|
+ * the link at this point and cannot handle interrupts any longer.
|
|
+ */
|
|
+ }
|
|
}
|
|
|
|
link_control = intel_readl(shim, SDW_SHIM_LCTL);
|
|
|
|
mutex_unlock(sdw->link_res->shim_lock);
|
|
|
|
- if (ret < 0) {
|
|
- dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
|
|
-
|
|
- return ret;
|
|
- }
|
|
-
|
|
- sdw->cdns.link_up = false;
|
|
- return 0;
|
|
+ return ret;
|
|
}
|
|
|
|
static void intel_shim_sync_arm(struct sdw_intel *sdw)
|
|
diff --git a/drivers/staging/board/board.c b/drivers/staging/board/board.c
|
|
index cb6feb34dd401..f980af0373452 100644
|
|
--- a/drivers/staging/board/board.c
|
|
+++ b/drivers/staging/board/board.c
|
|
@@ -136,6 +136,7 @@ int __init board_staging_register_clock(const struct board_staging_clk *bsc)
|
|
static int board_staging_add_dev_domain(struct platform_device *pdev,
|
|
const char *domain)
|
|
{
|
|
+ struct device *dev = &pdev->dev;
|
|
struct of_phandle_args pd_args;
|
|
struct device_node *np;
|
|
|
|
@@ -148,7 +149,11 @@ static int board_staging_add_dev_domain(struct platform_device *pdev,
|
|
pd_args.np = np;
|
|
pd_args.args_count = 0;
|
|
|
|
- return of_genpd_add_device(&pd_args, &pdev->dev);
|
|
+ /* Initialization similar to device_pm_init_common() */
|
|
+ spin_lock_init(&dev->power.lock);
|
|
+ dev->power.early_init = true;
|
|
+
|
|
+ return of_genpd_add_device(&pd_args, dev);
|
|
}
|
|
#else
|
|
static inline int board_staging_add_dev_domain(struct platform_device *pdev,
|
|
diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
|
|
index 78dc8beeae98e..8c740c771f509 100644
|
|
--- a/drivers/staging/ks7010/ks7010_sdio.c
|
|
+++ b/drivers/staging/ks7010/ks7010_sdio.c
|
|
@@ -939,9 +939,9 @@ static void ks7010_private_init(struct ks_wlan_private *priv,
|
|
memset(&priv->wstats, 0, sizeof(priv->wstats));
|
|
|
|
/* sleep mode */
|
|
+ atomic_set(&priv->sleepstatus.status, 0);
|
|
atomic_set(&priv->sleepstatus.doze_request, 0);
|
|
atomic_set(&priv->sleepstatus.wakeup_request, 0);
|
|
- atomic_set(&priv->sleepstatus.wakeup_request, 0);
|
|
|
|
trx_device_init(priv);
|
|
hostif_init(priv);
|
|
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
|
|
index 0295e2e32d797..fa1bd99cd6f17 100644
|
|
--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
|
|
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
|
|
@@ -1763,7 +1763,8 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
|
|
if (err < 0)
|
|
goto register_entities_fail;
|
|
/* init atomisp wdts */
|
|
- if (init_atomisp_wdts(isp) != 0)
|
|
+ err = init_atomisp_wdts(isp);
|
|
+ if (err != 0)
|
|
goto wdt_work_queue_fail;
|
|
|
|
/* save the iunit context only once after all the values are init'ed. */
|
|
@@ -1815,6 +1816,7 @@ request_irq_fail:
|
|
hmm_cleanup();
|
|
hmm_pool_unregister(HMM_POOL_TYPE_RESERVED);
|
|
hmm_pool_fail:
|
|
+ pm_runtime_get_noresume(&pdev->dev);
|
|
destroy_workqueue(isp->wdt_work_queue);
|
|
wdt_work_queue_fail:
|
|
atomisp_acc_cleanup(isp);
|
|
diff --git a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
|
|
index a5cdf150cd16c..d30bdc678cc24 100644
|
|
--- a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
|
|
+++ b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
|
|
@@ -377,12 +377,17 @@ static void cfg_ref(struct hantro_ctx *ctx,
|
|
vb2_dst = hantro_get_dst_buf(ctx);
|
|
|
|
ref = hantro_get_ref(ctx, hdr->last_frame_ts);
|
|
- if (!ref)
|
|
+ if (!ref) {
|
|
+ vpu_debug(0, "failed to find last frame ts=%llu\n",
|
|
+ hdr->last_frame_ts);
|
|
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
|
|
+ }
|
|
vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(0));
|
|
|
|
ref = hantro_get_ref(ctx, hdr->golden_frame_ts);
|
|
- WARN_ON(!ref && hdr->golden_frame_ts);
|
|
+ if (!ref && hdr->golden_frame_ts)
|
|
+ vpu_debug(0, "failed to find golden frame ts=%llu\n",
|
|
+ hdr->golden_frame_ts);
|
|
if (!ref)
|
|
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
|
|
if (hdr->flags & V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_GOLDEN)
|
|
@@ -390,7 +395,9 @@ static void cfg_ref(struct hantro_ctx *ctx,
|
|
vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(4));
|
|
|
|
ref = hantro_get_ref(ctx, hdr->alt_frame_ts);
|
|
- WARN_ON(!ref && hdr->alt_frame_ts);
|
|
+ if (!ref && hdr->alt_frame_ts)
|
|
+ vpu_debug(0, "failed to find alt frame ts=%llu\n",
|
|
+ hdr->alt_frame_ts);
|
|
if (!ref)
|
|
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
|
|
if (hdr->flags & V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_ALT)
|
|
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
|
|
index a4a792f00b111..5b8c8fc49cce8 100644
|
|
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
|
|
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
|
|
@@ -454,12 +454,17 @@ static void cfg_ref(struct hantro_ctx *ctx,
|
|
vb2_dst = hantro_get_dst_buf(ctx);
|
|
|
|
ref = hantro_get_ref(ctx, hdr->last_frame_ts);
|
|
- if (!ref)
|
|
+ if (!ref) {
|
|
+ vpu_debug(0, "failed to find last frame ts=%llu\n",
|
|
+ hdr->last_frame_ts);
|
|
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
|
|
+ }
|
|
vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF0);
|
|
|
|
ref = hantro_get_ref(ctx, hdr->golden_frame_ts);
|
|
- WARN_ON(!ref && hdr->golden_frame_ts);
|
|
+ if (!ref && hdr->golden_frame_ts)
|
|
+ vpu_debug(0, "failed to find golden frame ts=%llu\n",
|
|
+ hdr->golden_frame_ts);
|
|
if (!ref)
|
|
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
|
|
if (hdr->flags & V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_GOLDEN)
|
|
@@ -467,7 +472,9 @@ static void cfg_ref(struct hantro_ctx *ctx,
|
|
vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF2_5(2));
|
|
|
|
ref = hantro_get_ref(ctx, hdr->alt_frame_ts);
|
|
- WARN_ON(!ref && hdr->alt_frame_ts);
|
|
+ if (!ref && hdr->alt_frame_ts)
|
|
+ vpu_debug(0, "failed to find alt frame ts=%llu\n",
|
|
+ hdr->alt_frame_ts);
|
|
if (!ref)
|
|
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
|
|
if (hdr->flags & V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_ALT)
|
|
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
|
|
index 1deb74112ad43..11d9d9155eef2 100644
|
|
--- a/drivers/staging/rts5208/rtsx_scsi.c
|
|
+++ b/drivers/staging/rts5208/rtsx_scsi.c
|
|
@@ -2802,10 +2802,10 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
|
|
}
|
|
|
|
if (dev_info_id == 0x15) {
|
|
- buf_len = 0x3A;
|
|
+ buf_len = 0x3C;
|
|
data_len = 0x3A;
|
|
} else {
|
|
- buf_len = 0x6A;
|
|
+ buf_len = 0x6C;
|
|
data_len = 0x6A;
|
|
}
|
|
|
|
@@ -2855,11 +2855,7 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
|
|
}
|
|
|
|
rtsx_stor_set_xfer_buf(buf, buf_len, srb);
|
|
-
|
|
- if (dev_info_id == 0x15)
|
|
- scsi_set_resid(srb, scsi_bufflen(srb) - 0x3C);
|
|
- else
|
|
- scsi_set_resid(srb, scsi_bufflen(srb) - 0x6C);
|
|
+ scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
|
|
|
|
kfree(buf);
|
|
return STATUS_SUCCESS;
|
|
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
|
|
index 9a272a516b2d7..c4b157c29af7a 100644
|
|
--- a/drivers/thunderbolt/switch.c
|
|
+++ b/drivers/thunderbolt/switch.c
|
|
@@ -2204,7 +2204,7 @@ static void tb_switch_default_link_ports(struct tb_switch *sw)
|
|
{
|
|
int i;
|
|
|
|
- for (i = 1; i <= sw->config.max_port_number; i += 2) {
|
|
+ for (i = 1; i <= sw->config.max_port_number; i++) {
|
|
struct tb_port *port = &sw->ports[i];
|
|
struct tb_port *subordinate;
|
|
|
|
diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
|
|
index e8c58f9bd2632..d6afaae1729aa 100644
|
|
--- a/drivers/tty/hvc/hvsi.c
|
|
+++ b/drivers/tty/hvc/hvsi.c
|
|
@@ -1038,7 +1038,7 @@ static const struct tty_operations hvsi_ops = {
|
|
|
|
static int __init hvsi_init(void)
|
|
{
|
|
- int i;
|
|
+ int i, ret;
|
|
|
|
hvsi_driver = alloc_tty_driver(hvsi_count);
|
|
if (!hvsi_driver)
|
|
@@ -1069,12 +1069,25 @@ static int __init hvsi_init(void)
|
|
}
|
|
hvsi_wait = wait_for_state; /* irqs active now */
|
|
|
|
- if (tty_register_driver(hvsi_driver))
|
|
- panic("Couldn't register hvsi console driver\n");
|
|
+ ret = tty_register_driver(hvsi_driver);
|
|
+ if (ret) {
|
|
+ pr_err("Couldn't register hvsi console driver\n");
|
|
+ goto err_free_irq;
|
|
+ }
|
|
|
|
printk(KERN_DEBUG "HVSI: registered %i devices\n", hvsi_count);
|
|
|
|
return 0;
|
|
+err_free_irq:
|
|
+ hvsi_wait = poll_for_state;
|
|
+ for (i = 0; i < hvsi_count; i++) {
|
|
+ struct hvsi_struct *hp = &hvsi_ports[i];
|
|
+
|
|
+ free_irq(hp->virq, hp);
|
|
+ }
|
|
+ tty_driver_kref_put(hvsi_driver);
|
|
+
|
|
+ return ret;
|
|
}
|
|
device_initcall(hvsi_init);
|
|
|
|
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
|
|
index c37468887fd2a..efe4cf32add2c 100644
|
|
--- a/drivers/tty/serial/8250/8250_omap.c
|
|
+++ b/drivers/tty/serial/8250/8250_omap.c
|
|
@@ -617,7 +617,7 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
|
|
struct uart_port *port = dev_id;
|
|
struct omap8250_priv *priv = port->private_data;
|
|
struct uart_8250_port *up = up_to_u8250p(port);
|
|
- unsigned int iir;
|
|
+ unsigned int iir, lsr;
|
|
int ret;
|
|
|
|
#ifdef CONFIG_SERIAL_8250_DMA
|
|
@@ -628,6 +628,7 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
|
|
#endif
|
|
|
|
serial8250_rpm_get(up);
|
|
+ lsr = serial_port_in(port, UART_LSR);
|
|
iir = serial_port_in(port, UART_IIR);
|
|
ret = serial8250_handle_irq(port, iir);
|
|
|
|
@@ -642,6 +643,24 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
|
|
serial_port_in(port, UART_RX);
|
|
}
|
|
|
|
+ /* Stop processing interrupts on input overrun */
|
|
+ if ((lsr & UART_LSR_OE) && up->overrun_backoff_time_ms > 0) {
|
|
+ unsigned long delay;
|
|
+
|
|
+ up->ier = port->serial_in(port, UART_IER);
|
|
+ if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
|
|
+ port->ops->stop_rx(port);
|
|
+ } else {
|
|
+ /* Keep restarting the timer until
|
|
+ * the input overrun subsides.
|
|
+ */
|
|
+ cancel_delayed_work(&up->overrun_backoff);
|
|
+ }
|
|
+
|
|
+ delay = msecs_to_jiffies(up->overrun_backoff_time_ms);
|
|
+ schedule_delayed_work(&up->overrun_backoff, delay);
|
|
+ }
|
|
+
|
|
serial8250_rpm_put(up);
|
|
|
|
return IRQ_RETVAL(ret);
|
|
@@ -1353,6 +1372,10 @@ static int omap8250_probe(struct platform_device *pdev)
|
|
}
|
|
}
|
|
|
|
+ if (of_property_read_u32(np, "overrun-throttle-ms",
|
|
+ &up.overrun_backoff_time_ms) != 0)
|
|
+ up.overrun_backoff_time_ms = 0;
|
|
+
|
|
priv->wakeirq = irq_of_parse_and_map(np, 1);
|
|
|
|
pdata = of_device_get_match_data(&pdev->dev);
|
|
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
|
|
index 39f9ea24e3169..58f718ed1bb98 100644
|
|
--- a/drivers/tty/serial/8250/8250_pci.c
|
|
+++ b/drivers/tty/serial/8250/8250_pci.c
|
|
@@ -87,7 +87,7 @@ static void moan_device(const char *str, struct pci_dev *dev)
|
|
|
|
static int
|
|
setup_port(struct serial_private *priv, struct uart_8250_port *port,
|
|
- int bar, int offset, int regshift)
|
|
+ u8 bar, unsigned int offset, int regshift)
|
|
{
|
|
struct pci_dev *dev = priv->dev;
|
|
|
|
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
|
|
index 3de0a16e055a3..5d40f1010fbfd 100644
|
|
--- a/drivers/tty/serial/8250/8250_port.c
|
|
+++ b/drivers/tty/serial/8250/8250_port.c
|
|
@@ -122,7 +122,8 @@ static const struct serial8250_config uart_config[] = {
|
|
.name = "16C950/954",
|
|
.fifo_size = 128,
|
|
.tx_loadsz = 128,
|
|
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
|
|
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01,
|
|
+ .rxtrig_bytes = {16, 32, 112, 120},
|
|
/* UART_CAP_EFR breaks billionon CF bluetooth card. */
|
|
.flags = UART_CAP_FIFO | UART_CAP_SLEEP,
|
|
},
|
|
diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
|
|
index bf0e2a4cb0cef..c6f927a76c3be 100644
|
|
--- a/drivers/tty/serial/jsm/jsm_neo.c
|
|
+++ b/drivers/tty/serial/jsm/jsm_neo.c
|
|
@@ -815,7 +815,9 @@ static void neo_parse_isr(struct jsm_board *brd, u32 port)
|
|
/* Parse any modem signal changes */
|
|
jsm_dbg(INTR, &ch->ch_bd->pci_dev,
|
|
"MOD_STAT: sending to parse_modem_sigs\n");
|
|
+ spin_lock_irqsave(&ch->uart_port.lock, lock_flags);
|
|
neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
|
|
+ spin_unlock_irqrestore(&ch->uart_port.lock, lock_flags);
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
|
|
index 689774c073ca4..8438454ca653f 100644
|
|
--- a/drivers/tty/serial/jsm/jsm_tty.c
|
|
+++ b/drivers/tty/serial/jsm/jsm_tty.c
|
|
@@ -187,6 +187,7 @@ static void jsm_tty_break(struct uart_port *port, int break_state)
|
|
|
|
static int jsm_tty_open(struct uart_port *port)
|
|
{
|
|
+ unsigned long lock_flags;
|
|
struct jsm_board *brd;
|
|
struct jsm_channel *channel =
|
|
container_of(port, struct jsm_channel, uart_port);
|
|
@@ -240,6 +241,7 @@ static int jsm_tty_open(struct uart_port *port)
|
|
channel->ch_cached_lsr = 0;
|
|
channel->ch_stops_sent = 0;
|
|
|
|
+ spin_lock_irqsave(&port->lock, lock_flags);
|
|
termios = &port->state->port.tty->termios;
|
|
channel->ch_c_cflag = termios->c_cflag;
|
|
channel->ch_c_iflag = termios->c_iflag;
|
|
@@ -259,6 +261,7 @@ static int jsm_tty_open(struct uart_port *port)
|
|
jsm_carrier(channel);
|
|
|
|
channel->ch_open_count++;
|
|
+ spin_unlock_irqrestore(&port->lock, lock_flags);
|
|
|
|
jsm_dbg(OPEN, &channel->ch_bd->pci_dev, "finish\n");
|
|
return 0;
|
|
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
|
|
index 70898a999a498..f700bfaef1293 100644
|
|
--- a/drivers/tty/serial/sh-sci.c
|
|
+++ b/drivers/tty/serial/sh-sci.c
|
|
@@ -1760,6 +1760,10 @@ static irqreturn_t sci_br_interrupt(int irq, void *ptr)
|
|
|
|
/* Handle BREAKs */
|
|
sci_handle_breaks(port);
|
|
+
|
|
+ /* drop invalid character received before break was detected */
|
|
+ serial_port_in(port, SCxRDR);
|
|
+
|
|
sci_clear_SCxSR(port, SCxSR_BREAK_CLEAR(port));
|
|
|
|
return IRQ_HANDLED;
|
|
@@ -1839,7 +1843,8 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
|
|
ret = sci_er_interrupt(irq, ptr);
|
|
|
|
/* Break Interrupt */
|
|
- if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
|
|
+ if (s->irqs[SCIx_ERI_IRQ] != s->irqs[SCIx_BRI_IRQ] &&
|
|
+ (ssr_status & SCxSR_BRK(port)) && err_enabled)
|
|
ret = sci_br_interrupt(irq, ptr);
|
|
|
|
/* Overrun Interrupt */
|
|
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
|
|
index 48e4a5ca18359..f5f56ee07729f 100644
|
|
--- a/drivers/usb/chipidea/host.c
|
|
+++ b/drivers/usb/chipidea/host.c
|
|
@@ -233,18 +233,26 @@ static int ci_ehci_hub_control(
|
|
)
|
|
{
|
|
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
|
|
+ unsigned int ports = HCS_N_PORTS(ehci->hcs_params);
|
|
u32 __iomem *status_reg;
|
|
- u32 temp;
|
|
+ u32 temp, port_index;
|
|
unsigned long flags;
|
|
int retval = 0;
|
|
struct device *dev = hcd->self.controller;
|
|
struct ci_hdrc *ci = dev_get_drvdata(dev);
|
|
|
|
- status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
|
|
+ port_index = wIndex & 0xff;
|
|
+ port_index -= (port_index > 0);
|
|
+ status_reg = &ehci->regs->port_status[port_index];
|
|
|
|
spin_lock_irqsave(&ehci->lock, flags);
|
|
|
|
if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
|
|
+ if (!wIndex || wIndex > ports) {
|
|
+ retval = -EPIPE;
|
|
+ goto done;
|
|
+ }
|
|
+
|
|
temp = ehci_readl(ehci, status_reg);
|
|
if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
|
|
retval = -EPIPE;
|
|
@@ -273,7 +281,7 @@ static int ci_ehci_hub_control(
|
|
ehci_writel(ehci, temp, status_reg);
|
|
}
|
|
|
|
- set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
|
|
+ set_bit(port_index, &ehci->suspended_ports);
|
|
goto done;
|
|
}
|
|
|
|
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
|
|
index 1a556a628971f..3ffa939678d77 100644
|
|
--- a/drivers/usb/gadget/composite.c
|
|
+++ b/drivers/usb/gadget/composite.c
|
|
@@ -481,7 +481,7 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
|
|
{
|
|
unsigned val;
|
|
|
|
- if (c->MaxPower)
|
|
+ if (c->MaxPower || (c->bmAttributes & USB_CONFIG_ATT_SELFPOWER))
|
|
val = c->MaxPower;
|
|
else
|
|
val = CONFIG_USB_GADGET_VBUS_DRAW;
|
|
@@ -905,7 +905,11 @@ static int set_config(struct usb_composite_dev *cdev,
|
|
}
|
|
|
|
/* when we return, be sure our power usage is valid */
|
|
- power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
|
|
+ if (c->MaxPower || (c->bmAttributes & USB_CONFIG_ATT_SELFPOWER))
|
|
+ power = c->MaxPower;
|
|
+ else
|
|
+ power = CONFIG_USB_GADGET_VBUS_DRAW;
|
|
+
|
|
if (gadget->speed < USB_SPEED_SUPER)
|
|
power = min(power, 500U);
|
|
else
|
|
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
|
|
index c019f2b0c0af3..a9cb647bac6fb 100644
|
|
--- a/drivers/usb/gadget/function/u_ether.c
|
|
+++ b/drivers/usb/gadget/function/u_ether.c
|
|
@@ -491,8 +491,9 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
|
|
}
|
|
spin_unlock_irqrestore(&dev->lock, flags);
|
|
|
|
- if (skb && !in) {
|
|
- dev_kfree_skb_any(skb);
|
|
+ if (!in) {
|
|
+ if (skb)
|
|
+ dev_kfree_skb_any(skb);
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
|
|
index cffdc8d01b2a8..8fd27249ad257 100644
|
|
--- a/drivers/usb/host/ehci-mv.c
|
|
+++ b/drivers/usb/host/ehci-mv.c
|
|
@@ -42,26 +42,25 @@ struct ehci_hcd_mv {
|
|
int (*set_vbus)(unsigned int vbus);
|
|
};
|
|
|
|
-static void ehci_clock_enable(struct ehci_hcd_mv *ehci_mv)
|
|
+static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
|
|
{
|
|
- clk_prepare_enable(ehci_mv->clk);
|
|
-}
|
|
+ int retval;
|
|
|
|
-static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
|
|
-{
|
|
- clk_disable_unprepare(ehci_mv->clk);
|
|
-}
|
|
+ retval = clk_prepare_enable(ehci_mv->clk);
|
|
+ if (retval)
|
|
+ return retval;
|
|
|
|
-static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
|
|
-{
|
|
- ehci_clock_enable(ehci_mv);
|
|
- return phy_init(ehci_mv->phy);
|
|
+ retval = phy_init(ehci_mv->phy);
|
|
+ if (retval)
|
|
+ clk_disable_unprepare(ehci_mv->clk);
|
|
+
|
|
+ return retval;
|
|
}
|
|
|
|
static void mv_ehci_disable(struct ehci_hcd_mv *ehci_mv)
|
|
{
|
|
phy_exit(ehci_mv->phy);
|
|
- ehci_clock_disable(ehci_mv);
|
|
+ clk_disable_unprepare(ehci_mv->clk);
|
|
}
|
|
|
|
static int mv_ehci_reset(struct usb_hcd *hcd)
|
|
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
|
|
index bd958f059fe64..ff0b3457fd342 100644
|
|
--- a/drivers/usb/host/fotg210-hcd.c
|
|
+++ b/drivers/usb/host/fotg210-hcd.c
|
|
@@ -2509,11 +2509,6 @@ retry_xacterr:
|
|
return count;
|
|
}
|
|
|
|
-/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
|
|
-#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
|
|
-/* ... and packet size, for any kind of endpoint descriptor */
|
|
-#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
|
|
-
|
|
/* reverse of qh_urb_transaction: free a list of TDs.
|
|
* used for cleanup after errors, before HC sees an URB's TDs.
|
|
*/
|
|
@@ -2599,7 +2594,7 @@ static struct list_head *qh_urb_transaction(struct fotg210_hcd *fotg210,
|
|
token |= (1 /* "in" */ << 8);
|
|
/* else it's already initted to "out" pid (0 << 8) */
|
|
|
|
- maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
|
|
+ maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input);
|
|
|
|
/*
|
|
* buffer gets wrapped in one or more qtds;
|
|
@@ -2713,9 +2708,11 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
|
|
gfp_t flags)
|
|
{
|
|
struct fotg210_qh *qh = fotg210_qh_alloc(fotg210, flags);
|
|
+ struct usb_host_endpoint *ep;
|
|
u32 info1 = 0, info2 = 0;
|
|
int is_input, type;
|
|
int maxp = 0;
|
|
+ int mult;
|
|
struct usb_tt *tt = urb->dev->tt;
|
|
struct fotg210_qh_hw *hw;
|
|
|
|
@@ -2730,14 +2727,15 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
|
|
|
|
is_input = usb_pipein(urb->pipe);
|
|
type = usb_pipetype(urb->pipe);
|
|
- maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
|
|
+ ep = usb_pipe_endpoint(urb->dev, urb->pipe);
|
|
+ maxp = usb_endpoint_maxp(&ep->desc);
|
|
+ mult = usb_endpoint_maxp_mult(&ep->desc);
|
|
|
|
/* 1024 byte maxpacket is a hardware ceiling. High bandwidth
|
|
* acts like up to 3KB, but is built from smaller packets.
|
|
*/
|
|
- if (max_packet(maxp) > 1024) {
|
|
- fotg210_dbg(fotg210, "bogus qh maxpacket %d\n",
|
|
- max_packet(maxp));
|
|
+ if (maxp > 1024) {
|
|
+ fotg210_dbg(fotg210, "bogus qh maxpacket %d\n", maxp);
|
|
goto done;
|
|
}
|
|
|
|
@@ -2751,8 +2749,7 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
|
|
*/
|
|
if (type == PIPE_INTERRUPT) {
|
|
qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
|
|
- is_input, 0,
|
|
- hb_mult(maxp) * max_packet(maxp)));
|
|
+ is_input, 0, mult * maxp));
|
|
qh->start = NO_FRAME;
|
|
|
|
if (urb->dev->speed == USB_SPEED_HIGH) {
|
|
@@ -2789,7 +2786,7 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
|
|
think_time = tt ? tt->think_time : 0;
|
|
qh->tt_usecs = NS_TO_US(think_time +
|
|
usb_calc_bus_time(urb->dev->speed,
|
|
- is_input, 0, max_packet(maxp)));
|
|
+ is_input, 0, maxp));
|
|
qh->period = urb->interval;
|
|
if (qh->period > fotg210->periodic_size) {
|
|
qh->period = fotg210->periodic_size;
|
|
@@ -2852,11 +2849,11 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
|
|
* to help them do so. So now people expect to use
|
|
* such nonconformant devices with Linux too; sigh.
|
|
*/
|
|
- info1 |= max_packet(maxp) << 16;
|
|
+ info1 |= maxp << 16;
|
|
info2 |= (FOTG210_TUNE_MULT_HS << 30);
|
|
} else { /* PIPE_INTERRUPT */
|
|
- info1 |= max_packet(maxp) << 16;
|
|
- info2 |= hb_mult(maxp) << 30;
|
|
+ info1 |= maxp << 16;
|
|
+ info2 |= mult << 30;
|
|
}
|
|
break;
|
|
default:
|
|
@@ -3926,6 +3923,7 @@ static void iso_stream_init(struct fotg210_hcd *fotg210,
|
|
int is_input;
|
|
long bandwidth;
|
|
unsigned multi;
|
|
+ struct usb_host_endpoint *ep;
|
|
|
|
/*
|
|
* this might be a "high bandwidth" highspeed endpoint,
|
|
@@ -3933,14 +3931,14 @@ static void iso_stream_init(struct fotg210_hcd *fotg210,
|
|
*/
|
|
epnum = usb_pipeendpoint(pipe);
|
|
is_input = usb_pipein(pipe) ? USB_DIR_IN : 0;
|
|
- maxp = usb_maxpacket(dev, pipe, !is_input);
|
|
+ ep = usb_pipe_endpoint(dev, pipe);
|
|
+ maxp = usb_endpoint_maxp(&ep->desc);
|
|
if (is_input)
|
|
buf1 = (1 << 11);
|
|
else
|
|
buf1 = 0;
|
|
|
|
- maxp = max_packet(maxp);
|
|
- multi = hb_mult(maxp);
|
|
+ multi = usb_endpoint_maxp_mult(&ep->desc);
|
|
buf1 |= maxp;
|
|
maxp *= multi;
|
|
|
|
@@ -4461,13 +4459,12 @@ static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
|
|
|
|
/* HC need not update length with this error */
|
|
if (!(t & FOTG210_ISOC_BABBLE)) {
|
|
- desc->actual_length =
|
|
- fotg210_itdlen(urb, desc, t);
|
|
+ desc->actual_length = FOTG210_ITD_LENGTH(t);
|
|
urb->actual_length += desc->actual_length;
|
|
}
|
|
} else if (likely((t & FOTG210_ISOC_ACTIVE) == 0)) {
|
|
desc->status = 0;
|
|
- desc->actual_length = fotg210_itdlen(urb, desc, t);
|
|
+ desc->actual_length = FOTG210_ITD_LENGTH(t);
|
|
urb->actual_length += desc->actual_length;
|
|
} else {
|
|
/* URB was too late */
|
|
diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h
|
|
index 6cee40ec65b41..67f59517ebade 100644
|
|
--- a/drivers/usb/host/fotg210.h
|
|
+++ b/drivers/usb/host/fotg210.h
|
|
@@ -686,11 +686,6 @@ static inline unsigned fotg210_read_frame_index(struct fotg210_hcd *fotg210)
|
|
return fotg210_readl(fotg210, &fotg210->regs->frame_index);
|
|
}
|
|
|
|
-#define fotg210_itdlen(urb, desc, t) ({ \
|
|
- usb_pipein((urb)->pipe) ? \
|
|
- (desc)->length - FOTG210_ITD_LENGTH(t) : \
|
|
- FOTG210_ITD_LENGTH(t); \
|
|
-})
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
#endif /* __LINUX_FOTG210_H */
|
|
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
|
|
index a8d97e23f601f..c51391b45207e 100644
|
|
--- a/drivers/usb/host/xhci.c
|
|
+++ b/drivers/usb/host/xhci.c
|
|
@@ -4666,19 +4666,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
|
|
{
|
|
unsigned long long timeout_ns;
|
|
|
|
- if (xhci->quirks & XHCI_INTEL_HOST)
|
|
- timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
|
|
- else
|
|
- timeout_ns = udev->u1_params.sel;
|
|
-
|
|
/* Prevent U1 if service interval is shorter than U1 exit latency */
|
|
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
|
|
- if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
|
|
+ if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
|
|
dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
|
|
return USB3_LPM_DISABLED;
|
|
}
|
|
}
|
|
|
|
+ if (xhci->quirks & XHCI_INTEL_HOST)
|
|
+ timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
|
|
+ else
|
|
+ timeout_ns = udev->u1_params.sel;
|
|
+
|
|
/* The U1 timeout is encoded in 1us intervals.
|
|
* Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
|
|
*/
|
|
@@ -4730,19 +4730,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
|
|
{
|
|
unsigned long long timeout_ns;
|
|
|
|
- if (xhci->quirks & XHCI_INTEL_HOST)
|
|
- timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
|
|
- else
|
|
- timeout_ns = udev->u2_params.sel;
|
|
-
|
|
/* Prevent U2 if service interval is shorter than U2 exit latency */
|
|
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
|
|
- if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
|
|
+ if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
|
|
dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
|
|
return USB3_LPM_DISABLED;
|
|
}
|
|
}
|
|
|
|
+ if (xhci->quirks & XHCI_INTEL_HOST)
|
|
+ timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
|
|
+ else
|
|
+ timeout_ns = udev->u2_params.sel;
|
|
+
|
|
/* The U2 timeout is encoded in 256us intervals */
|
|
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
|
|
/* If the necessary timeout value is bigger than what we can set in the
|
|
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
|
|
index 5892f3ce0cdc8..ce9fc46c92661 100644
|
|
--- a/drivers/usb/musb/musb_dsps.c
|
|
+++ b/drivers/usb/musb/musb_dsps.c
|
|
@@ -890,23 +890,22 @@ static int dsps_probe(struct platform_device *pdev)
|
|
if (!glue->usbss_base)
|
|
return -ENXIO;
|
|
|
|
- if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
|
|
- ret = dsps_setup_optional_vbus_irq(pdev, glue);
|
|
- if (ret)
|
|
- goto err_iounmap;
|
|
- }
|
|
-
|
|
platform_set_drvdata(pdev, glue);
|
|
pm_runtime_enable(&pdev->dev);
|
|
ret = dsps_create_musb_pdev(glue, pdev);
|
|
if (ret)
|
|
goto err;
|
|
|
|
+ if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
|
|
+ ret = dsps_setup_optional_vbus_irq(pdev, glue);
|
|
+ if (ret)
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
return 0;
|
|
|
|
err:
|
|
pm_runtime_disable(&pdev->dev);
|
|
-err_iounmap:
|
|
iounmap(glue->usbss_base);
|
|
return ret;
|
|
}
|
|
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
|
|
index 4ba6bcdaa8e9d..b07b2925ff78b 100644
|
|
--- a/drivers/usb/usbip/vhci_hcd.c
|
|
+++ b/drivers/usb/usbip/vhci_hcd.c
|
|
@@ -455,8 +455,14 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
|
|
vhci_hcd->port_status[rhport] &= ~(1 << USB_PORT_FEAT_RESET);
|
|
vhci_hcd->re_timeout = 0;
|
|
|
|
+ /*
|
|
+ * A few drivers do usb reset during probe when
|
|
+ * the device could be in VDEV_ST_USED state
|
|
+ */
|
|
if (vhci_hcd->vdev[rhport].ud.status ==
|
|
- VDEV_ST_NOTASSIGNED) {
|
|
+ VDEV_ST_NOTASSIGNED ||
|
|
+ vhci_hcd->vdev[rhport].ud.status ==
|
|
+ VDEV_ST_USED) {
|
|
usbip_dbg_vhci_rh(
|
|
" enable rhport %d (status %u)\n",
|
|
rhport,
|
|
@@ -957,8 +963,32 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
|
|
spin_lock(&vdev->priv_lock);
|
|
|
|
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
|
|
+ struct urb *urb;
|
|
+
|
|
+ /* give back urb of unsent unlink request */
|
|
pr_info("unlink cleanup tx %lu\n", unlink->unlink_seqnum);
|
|
+
|
|
+ urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
|
|
+ if (!urb) {
|
|
+ list_del(&unlink->list);
|
|
+ kfree(unlink);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ urb->status = -ENODEV;
|
|
+
|
|
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
|
|
+
|
|
list_del(&unlink->list);
|
|
+
|
|
+ spin_unlock(&vdev->priv_lock);
|
|
+ spin_unlock_irqrestore(&vhci->lock, flags);
|
|
+
|
|
+ usb_hcd_giveback_urb(hcd, urb, urb->status);
|
|
+
|
|
+ spin_lock_irqsave(&vhci->lock, flags);
|
|
+ spin_lock(&vdev->priv_lock);
|
|
+
|
|
kfree(unlink);
|
|
}
|
|
|
|
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
|
|
index 67d0bf4efa160..e44bf736e2b22 100644
|
|
--- a/drivers/vfio/Kconfig
|
|
+++ b/drivers/vfio/Kconfig
|
|
@@ -29,7 +29,7 @@ menuconfig VFIO
|
|
|
|
If you don't know what to do here, say N.
|
|
|
|
-menuconfig VFIO_NOIOMMU
|
|
+config VFIO_NOIOMMU
|
|
bool "VFIO No-IOMMU support"
|
|
depends on VFIO
|
|
help
|
|
diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c
|
|
index 3e006da477523..84c56f525889f 100644
|
|
--- a/drivers/video/fbdev/asiliantfb.c
|
|
+++ b/drivers/video/fbdev/asiliantfb.c
|
|
@@ -227,6 +227,9 @@ static int asiliantfb_check_var(struct fb_var_screeninfo *var,
|
|
{
|
|
unsigned long Ftarget, ratio, remainder;
|
|
|
|
+ if (!var->pixclock)
|
|
+ return -EINVAL;
|
|
+
|
|
ratio = 1000000 / var->pixclock;
|
|
remainder = 1000000 % var->pixclock;
|
|
Ftarget = 1000000 * ratio + (1000000 * remainder) / var->pixclock;
|
|
diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
|
|
index 8fbde92ae8b9c..25801e8e3f74a 100644
|
|
--- a/drivers/video/fbdev/kyro/fbdev.c
|
|
+++ b/drivers/video/fbdev/kyro/fbdev.c
|
|
@@ -372,6 +372,11 @@ static int kyro_dev_overlay_viewport_set(u32 x, u32 y, u32 ulWidth, u32 ulHeight
|
|
/* probably haven't called CreateOverlay yet */
|
|
return -EINVAL;
|
|
|
|
+ if (ulWidth == 0 || ulWidth == 0xffffffff ||
|
|
+ ulHeight == 0 || ulHeight == 0xffffffff ||
|
|
+ (x < 2 && ulWidth + 2 == 0))
|
|
+ return -EINVAL;
|
|
+
|
|
/* Stop Ramdac Output */
|
|
DisableRamdacOutput(deviceInfo.pSTGReg);
|
|
|
|
@@ -394,6 +399,9 @@ static int kyrofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
|
|
{
|
|
struct kyrofb_info *par = info->par;
|
|
|
|
+ if (!var->pixclock)
|
|
+ return -EINVAL;
|
|
+
|
|
if (var->bits_per_pixel != 16 && var->bits_per_pixel != 32) {
|
|
printk(KERN_WARNING "kyrofb: depth not supported: %u\n", var->bits_per_pixel);
|
|
return -EINVAL;
|
|
diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
|
|
index ce55b9d2e862b..7dd621c7afe4c 100644
|
|
--- a/drivers/video/fbdev/riva/fbdev.c
|
|
+++ b/drivers/video/fbdev/riva/fbdev.c
|
|
@@ -1084,6 +1084,9 @@ static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
|
|
int mode_valid = 0;
|
|
|
|
NVTRACE_ENTER();
|
|
+ if (!var->pixclock)
|
|
+ return -EINVAL;
|
|
+
|
|
switch (var->bits_per_pixel) {
|
|
case 1 ... 8:
|
|
var->red.offset = var->green.offset = var->blue.offset = 0;
|
|
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
|
|
index 69c6786a9fdf2..ff3f0638cdb90 100644
|
|
--- a/fs/btrfs/inode.c
|
|
+++ b/fs/btrfs/inode.c
|
|
@@ -1202,11 +1202,6 @@ static noinline void async_cow_submit(struct btrfs_work *work)
|
|
nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
|
|
PAGE_SHIFT;
|
|
|
|
- /* atomic_sub_return implies a barrier */
|
|
- if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
|
|
- 5 * SZ_1M)
|
|
- cond_wake_up_nomb(&fs_info->async_submit_wait);
|
|
-
|
|
/*
|
|
* ->inode could be NULL if async_chunk_start has failed to compress,
|
|
* in which case we don't have anything to submit, yet we need to
|
|
@@ -1215,6 +1210,11 @@ static noinline void async_cow_submit(struct btrfs_work *work)
|
|
*/
|
|
if (async_chunk->inode)
|
|
submit_compressed_extents(async_chunk);
|
|
+
|
|
+ /* atomic_sub_return implies a barrier */
|
|
+ if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
|
|
+ 5 * SZ_1M)
|
|
+ cond_wake_up_nomb(&fs_info->async_submit_wait);
|
|
}
|
|
|
|
static noinline void async_cow_free(struct btrfs_work *work)
|
|
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
|
|
index f36928efcf92d..ec25e5eab3499 100644
|
|
--- a/fs/btrfs/tree-log.c
|
|
+++ b/fs/btrfs/tree-log.c
|
|
@@ -708,7 +708,9 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
|
|
*/
|
|
ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
|
|
ins.offset);
|
|
- if (ret == 0) {
|
|
+ if (ret < 0) {
|
|
+ goto out;
|
|
+ } else if (ret == 0) {
|
|
btrfs_init_generic_ref(&ref,
|
|
BTRFS_ADD_DELAYED_REF,
|
|
ins.objectid, ins.offset, 0);
|
|
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
|
|
index d1fccddcf4035..b4fcc48f255b3 100644
|
|
--- a/fs/btrfs/volumes.c
|
|
+++ b/fs/btrfs/volumes.c
|
|
@@ -1129,6 +1129,9 @@ static void btrfs_close_one_device(struct btrfs_device *device)
|
|
fs_devices->rw_devices--;
|
|
}
|
|
|
|
+ if (device->devid == BTRFS_DEV_REPLACE_DEVID)
|
|
+ clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
|
|
+
|
|
if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
|
|
fs_devices->missing_devices--;
|
|
|
|
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
|
|
index b864c9b9e8df1..678dac8365ed3 100644
|
|
--- a/fs/ceph/caps.c
|
|
+++ b/fs/ceph/caps.c
|
|
@@ -1755,6 +1755,9 @@ struct ceph_cap_flush *ceph_alloc_cap_flush(void)
|
|
struct ceph_cap_flush *cf;
|
|
|
|
cf = kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
|
|
+ if (!cf)
|
|
+ return NULL;
|
|
+
|
|
cf->is_capsnap = false;
|
|
return cf;
|
|
}
|
|
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
|
|
index 1a0298d1e7cda..d58c5ffeca0d9 100644
|
|
--- a/fs/cifs/sess.c
|
|
+++ b/fs/cifs/sess.c
|
|
@@ -888,7 +888,7 @@ sess_alloc_buffer(struct sess_data *sess_data, int wct)
|
|
return 0;
|
|
|
|
out_free_smb_buf:
|
|
- kfree(smb_buf);
|
|
+ cifs_small_buf_release(smb_buf);
|
|
sess_data->iov[0].iov_base = NULL;
|
|
sess_data->iov[0].iov_len = 0;
|
|
sess_data->buf0_type = CIFS_NO_BUFFER;
|
|
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
|
|
index f94b13075ea47..30987ea011f1a 100644
|
|
--- a/fs/f2fs/compress.c
|
|
+++ b/fs/f2fs/compress.c
|
|
@@ -1308,12 +1308,6 @@ out_destroy_crypt:
|
|
|
|
for (--i; i >= 0; i--)
|
|
fscrypt_finalize_bounce_page(&cc->cpages[i]);
|
|
- for (i = 0; i < cc->nr_cpages; i++) {
|
|
- if (!cc->cpages[i])
|
|
- continue;
|
|
- f2fs_compress_free_page(cc->cpages[i]);
|
|
- cc->cpages[i] = NULL;
|
|
- }
|
|
out_put_cic:
|
|
kmem_cache_free(cic_entry_slab, cic);
|
|
out_put_dnode:
|
|
@@ -1324,6 +1318,12 @@ out_unlock_op:
|
|
else
|
|
f2fs_unlock_op(sbi);
|
|
out_free:
|
|
+ for (i = 0; i < cc->nr_cpages; i++) {
|
|
+ if (!cc->cpages[i])
|
|
+ continue;
|
|
+ f2fs_compress_free_page(cc->cpages[i]);
|
|
+ cc->cpages[i] = NULL;
|
|
+ }
|
|
page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
|
|
cc->cpages = NULL;
|
|
return -EAGAIN;
|
|
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
|
|
index cfae2dddb0bae..1b11a42847c48 100644
|
|
--- a/fs/f2fs/data.c
|
|
+++ b/fs/f2fs/data.c
|
|
@@ -1550,7 +1550,21 @@ next_dnode:
|
|
if (err) {
|
|
if (flag == F2FS_GET_BLOCK_BMAP)
|
|
map->m_pblk = 0;
|
|
+
|
|
if (err == -ENOENT) {
|
|
+ /*
|
|
+ * There is one exceptional case that read_node_page()
|
|
+ * may return -ENOENT due to filesystem has been
|
|
+ * shutdown or cp_error, so force to convert error
|
|
+ * number to EIO for such case.
|
|
+ */
|
|
+ if (map->m_may_create &&
|
|
+ (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
|
|
+ f2fs_cp_error(sbi))) {
|
|
+ err = -EIO;
|
|
+ goto unlock_out;
|
|
+ }
|
|
+
|
|
err = 0;
|
|
if (map->m_next_pgofs)
|
|
*map->m_next_pgofs =
|
|
@@ -2205,6 +2219,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
|
|
continue;
|
|
}
|
|
unlock_page(page);
|
|
+ if (for_write)
|
|
+ put_page(page);
|
|
cc->rpages[i] = NULL;
|
|
cc->nr_rpages--;
|
|
}
|
|
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
|
|
index 4b9ef8bbfa4a9..6694298b1660f 100644
|
|
--- a/fs/f2fs/dir.c
|
|
+++ b/fs/f2fs/dir.c
|
|
@@ -938,6 +938,7 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(d->inode);
|
|
struct blk_plug plug;
|
|
bool readdir_ra = sbi->readdir_ra == 1;
|
|
+ bool found_valid_dirent = false;
|
|
int err = 0;
|
|
|
|
bit_pos = ((unsigned long)ctx->pos % d->max);
|
|
@@ -952,13 +953,15 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
|
|
|
|
de = &d->dentry[bit_pos];
|
|
if (de->name_len == 0) {
|
|
+ if (found_valid_dirent || !bit_pos) {
|
|
+ printk_ratelimited(
|
|
+ "%sF2FS-fs (%s): invalid namelen(0), ino:%u, run fsck to fix.",
|
|
+ KERN_WARNING, sbi->sb->s_id,
|
|
+ le32_to_cpu(de->ino));
|
|
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
|
|
+ }
|
|
bit_pos++;
|
|
ctx->pos = start_pos + bit_pos;
|
|
- printk_ratelimited(
|
|
- "%sF2FS-fs (%s): invalid namelen(0), ino:%u, run fsck to fix.",
|
|
- KERN_WARNING, sbi->sb->s_id,
|
|
- le32_to_cpu(de->ino));
|
|
- set_sbi_flag(sbi, SBI_NEED_FSCK);
|
|
continue;
|
|
}
|
|
|
|
@@ -1001,6 +1004,7 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
|
|
f2fs_ra_node_page(sbi, le32_to_cpu(de->ino));
|
|
|
|
ctx->pos = start_pos + bit_pos;
|
|
+ found_valid_dirent = true;
|
|
}
|
|
out:
|
|
if (readdir_ra)
|
|
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
|
|
index 6ee8b1e0e1741..1fbaab1f7aba8 100644
|
|
--- a/fs/f2fs/file.c
|
|
+++ b/fs/f2fs/file.c
|
|
@@ -1080,7 +1080,6 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
|
}
|
|
|
|
if (pg_start < pg_end) {
|
|
- struct address_space *mapping = inode->i_mapping;
|
|
loff_t blk_start, blk_end;
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
|
|
@@ -1092,8 +1091,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
|
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
|
down_write(&F2FS_I(inode)->i_mmap_sem);
|
|
|
|
- truncate_inode_pages_range(mapping, blk_start,
|
|
- blk_end - 1);
|
|
+ truncate_pagecache_range(inode, blk_start, blk_end - 1);
|
|
|
|
f2fs_lock_op(sbi);
|
|
ret = f2fs_truncate_hole(inode, pg_start, pg_end);
|
|
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
|
|
index e02affb5c0e79..72f227f6ebad0 100644
|
|
--- a/fs/f2fs/gc.c
|
|
+++ b/fs/f2fs/gc.c
|
|
@@ -1477,8 +1477,10 @@ next_step:
|
|
int err;
|
|
|
|
if (S_ISREG(inode->i_mode)) {
|
|
- if (!down_write_trylock(&fi->i_gc_rwsem[READ]))
|
|
+ if (!down_write_trylock(&fi->i_gc_rwsem[READ])) {
|
|
+ sbi->skipped_gc_rwsem++;
|
|
continue;
|
|
+ }
|
|
if (!down_write_trylock(
|
|
&fi->i_gc_rwsem[WRITE])) {
|
|
sbi->skipped_gc_rwsem++;
|
|
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
|
|
index 476b2c497d282..de543168b3708 100644
|
|
--- a/fs/f2fs/super.c
|
|
+++ b/fs/f2fs/super.c
|
|
@@ -2206,6 +2206,33 @@ static int f2fs_enable_quotas(struct super_block *sb)
|
|
return 0;
|
|
}
|
|
|
|
+static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type)
|
|
+{
|
|
+ struct quota_info *dqopt = sb_dqopt(sbi->sb);
|
|
+ struct address_space *mapping = dqopt->files[type]->i_mapping;
|
|
+ int ret = 0;
|
|
+
|
|
+ ret = dquot_writeback_dquots(sbi->sb, type);
|
|
+ if (ret)
|
|
+ goto out;
|
|
+
|
|
+ ret = filemap_fdatawrite(mapping);
|
|
+ if (ret)
|
|
+ goto out;
|
|
+
|
|
+ /* if we are using journalled quota */
|
|
+ if (is_journalled_quota(sbi))
|
|
+ goto out;
|
|
+
|
|
+ ret = filemap_fdatawait(mapping);
|
|
+
|
|
+ truncate_inode_pages(&dqopt->files[type]->i_data, 0);
|
|
+out:
|
|
+ if (ret)
|
|
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
int f2fs_quota_sync(struct super_block *sb, int type)
|
|
{
|
|
struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
|
@@ -2213,57 +2240,42 @@ int f2fs_quota_sync(struct super_block *sb, int type)
|
|
int cnt;
|
|
int ret;
|
|
|
|
- /*
|
|
- * do_quotactl
|
|
- * f2fs_quota_sync
|
|
- * down_read(quota_sem)
|
|
- * dquot_writeback_dquots()
|
|
- * f2fs_dquot_commit
|
|
- * block_operation
|
|
- * down_read(quota_sem)
|
|
- */
|
|
- f2fs_lock_op(sbi);
|
|
-
|
|
- down_read(&sbi->quota_sem);
|
|
- ret = dquot_writeback_dquots(sb, type);
|
|
- if (ret)
|
|
- goto out;
|
|
-
|
|
/*
|
|
* Now when everything is written we can discard the pagecache so
|
|
* that userspace sees the changes.
|
|
*/
|
|
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
|
|
- struct address_space *mapping;
|
|
|
|
if (type != -1 && cnt != type)
|
|
continue;
|
|
- if (!sb_has_quota_active(sb, cnt))
|
|
- continue;
|
|
|
|
- mapping = dqopt->files[cnt]->i_mapping;
|
|
+ if (!sb_has_quota_active(sb, type))
|
|
+ return 0;
|
|
|
|
- ret = filemap_fdatawrite(mapping);
|
|
- if (ret)
|
|
- goto out;
|
|
+ inode_lock(dqopt->files[cnt]);
|
|
|
|
- /* if we are using journalled quota */
|
|
- if (is_journalled_quota(sbi))
|
|
- continue;
|
|
+ /*
|
|
+ * do_quotactl
|
|
+ * f2fs_quota_sync
|
|
+ * down_read(quota_sem)
|
|
+ * dquot_writeback_dquots()
|
|
+ * f2fs_dquot_commit
|
|
+ * block_operation
|
|
+ * down_read(quota_sem)
|
|
+ */
|
|
+ f2fs_lock_op(sbi);
|
|
+ down_read(&sbi->quota_sem);
|
|
|
|
- ret = filemap_fdatawait(mapping);
|
|
- if (ret)
|
|
- set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
|
|
+ ret = f2fs_quota_sync_file(sbi, cnt);
|
|
+
|
|
+ up_read(&sbi->quota_sem);
|
|
+ f2fs_unlock_op(sbi);
|
|
|
|
- inode_lock(dqopt->files[cnt]);
|
|
- truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
|
|
inode_unlock(dqopt->files[cnt]);
|
|
+
|
|
+ if (ret)
|
|
+ break;
|
|
}
|
|
-out:
|
|
- if (ret)
|
|
- set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
|
|
- up_read(&sbi->quota_sem);
|
|
- f2fs_unlock_op(sbi);
|
|
return ret;
|
|
}
|
|
|
|
@@ -2898,11 +2910,13 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
|
|
return -EFSCORRUPTED;
|
|
}
|
|
|
|
- if (le32_to_cpu(raw_super->cp_payload) >
|
|
- (blocks_per_seg - F2FS_CP_PACKS)) {
|
|
- f2fs_info(sbi, "Insane cp_payload (%u > %u)",
|
|
+ if (le32_to_cpu(raw_super->cp_payload) >=
|
|
+ (blocks_per_seg - F2FS_CP_PACKS -
|
|
+ NR_CURSEG_PERSIST_TYPE)) {
|
|
+ f2fs_info(sbi, "Insane cp_payload (%u >= %u)",
|
|
le32_to_cpu(raw_super->cp_payload),
|
|
- blocks_per_seg - F2FS_CP_PACKS);
|
|
+ blocks_per_seg - F2FS_CP_PACKS -
|
|
+ NR_CURSEG_PERSIST_TYPE);
|
|
return -EFSCORRUPTED;
|
|
}
|
|
|
|
@@ -2938,6 +2952,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
|
unsigned int cp_pack_start_sum, cp_payload;
|
|
block_t user_block_count, valid_user_blocks;
|
|
block_t avail_node_count, valid_node_count;
|
|
+ unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
|
|
int i, j;
|
|
|
|
total = le32_to_cpu(raw_super->segment_count);
|
|
@@ -3058,6 +3073,17 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
|
return 1;
|
|
}
|
|
|
|
+ nat_blocks = nat_segs << log_blocks_per_seg;
|
|
+ nat_bits_bytes = nat_blocks / BITS_PER_BYTE;
|
|
+ nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
|
|
+ if (__is_set_ckpt_flags(ckpt, CP_NAT_BITS_FLAG) &&
|
|
+ (cp_payload + F2FS_CP_PACKS +
|
|
+ NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
|
|
+ f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
|
|
+ cp_payload, nat_bits_blocks);
|
|
+ return -EFSCORRUPTED;
|
|
+ }
|
|
+
|
|
if (unlikely(f2fs_cp_error(sbi))) {
|
|
f2fs_err(sbi, "A bug case: need to run fsck");
|
|
return 1;
|
|
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
|
|
index 751bc5b1cddf9..6104f627cc712 100644
|
|
--- a/fs/fscache/cookie.c
|
|
+++ b/fs/fscache/cookie.c
|
|
@@ -74,10 +74,8 @@ void fscache_free_cookie(struct fscache_cookie *cookie)
|
|
static int fscache_set_key(struct fscache_cookie *cookie,
|
|
const void *index_key, size_t index_key_len)
|
|
{
|
|
- unsigned long long h;
|
|
u32 *buf;
|
|
int bufs;
|
|
- int i;
|
|
|
|
bufs = DIV_ROUND_UP(index_key_len, sizeof(*buf));
|
|
|
|
@@ -91,17 +89,7 @@ static int fscache_set_key(struct fscache_cookie *cookie,
|
|
}
|
|
|
|
memcpy(buf, index_key, index_key_len);
|
|
-
|
|
- /* Calculate a hash and combine this with the length in the first word
|
|
- * or first half word
|
|
- */
|
|
- h = (unsigned long)cookie->parent;
|
|
- h += index_key_len + cookie->type;
|
|
-
|
|
- for (i = 0; i < bufs; i++)
|
|
- h += buf[i];
|
|
-
|
|
- cookie->key_hash = h ^ (h >> 32);
|
|
+ cookie->key_hash = fscache_hash(0, buf, bufs);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
|
|
index 08e91efbce538..64aa552b296d7 100644
|
|
--- a/fs/fscache/internal.h
|
|
+++ b/fs/fscache/internal.h
|
|
@@ -97,6 +97,8 @@ extern struct workqueue_struct *fscache_object_wq;
|
|
extern struct workqueue_struct *fscache_op_wq;
|
|
DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
|
|
|
|
+extern unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n);
|
|
+
|
|
static inline bool fscache_object_congested(void)
|
|
{
|
|
return workqueue_congested(WORK_CPU_UNBOUND, fscache_object_wq);
|
|
diff --git a/fs/fscache/main.c b/fs/fscache/main.c
|
|
index c1e6cc9091aac..4207f98e405fd 100644
|
|
--- a/fs/fscache/main.c
|
|
+++ b/fs/fscache/main.c
|
|
@@ -93,6 +93,45 @@ static struct ctl_table fscache_sysctls_root[] = {
|
|
};
|
|
#endif
|
|
|
|
+/*
|
|
+ * Mixing scores (in bits) for (7,20):
|
|
+ * Input delta: 1-bit 2-bit
|
|
+ * 1 round: 330.3 9201.6
|
|
+ * 2 rounds: 1246.4 25475.4
|
|
+ * 3 rounds: 1907.1 31295.1
|
|
+ * 4 rounds: 2042.3 31718.6
|
|
+ * Perfect: 2048 31744
|
|
+ * (32*64) (32*31/2 * 64)
|
|
+ */
|
|
+#define HASH_MIX(x, y, a) \
|
|
+ ( x ^= (a), \
|
|
+ y ^= x, x = rol32(x, 7),\
|
|
+ x += y, y = rol32(y,20),\
|
|
+ y *= 9 )
|
|
+
|
|
+static inline unsigned int fold_hash(unsigned long x, unsigned long y)
|
|
+{
|
|
+ /* Use arch-optimized multiply if one exists */
|
|
+ return __hash_32(y ^ __hash_32(x));
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Generate a hash. This is derived from full_name_hash(), but we want to be
|
|
+ * sure it is arch independent and that it doesn't change as bits of the
|
|
+ * computed hash value might appear on disk. The caller also guarantees that
|
|
+ * the hashed data will be a series of aligned 32-bit words.
|
|
+ */
|
|
+unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n)
|
|
+{
|
|
+ unsigned int a, x = 0, y = salt;
|
|
+
|
|
+ for (; n; n--) {
|
|
+ a = *data++;
|
|
+ HASH_MIX(x, y, a);
|
|
+ }
|
|
+ return fold_hash(x, y);
|
|
+}
|
|
+
|
|
/*
|
|
* initialise the fs caching module
|
|
*/
|
|
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
|
|
index 3faa421568b0a..bf539eab92c6f 100644
|
|
--- a/fs/gfs2/glops.c
|
|
+++ b/fs/gfs2/glops.c
|
|
@@ -623,16 +623,13 @@ static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
|
|
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
|
|
|
|
error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
|
|
- if (error)
|
|
- gfs2_consist(sdp);
|
|
- if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
|
|
- gfs2_consist(sdp);
|
|
-
|
|
- /* Initialize some head of the log stuff */
|
|
- if (!gfs2_withdrawn(sdp)) {
|
|
- sdp->sd_log_sequence = head.lh_sequence + 1;
|
|
- gfs2_log_pointers_init(sdp, head.lh_blkno);
|
|
- }
|
|
+ if (gfs2_assert_withdraw_delayed(sdp, !error))
|
|
+ return error;
|
|
+ if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
|
|
+ GFS2_LOG_HEAD_UNMOUNT))
|
|
+ return -EIO;
|
|
+ sdp->sd_log_sequence = head.lh_sequence + 1;
|
|
+ gfs2_log_pointers_init(sdp, head.lh_blkno);
|
|
}
|
|
return 0;
|
|
}
|
|
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
|
|
index 153272f82984b..5564aa8b45929 100644
|
|
--- a/fs/gfs2/lock_dlm.c
|
|
+++ b/fs/gfs2/lock_dlm.c
|
|
@@ -296,6 +296,11 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
|
|
gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
|
|
gfs2_update_request_times(gl);
|
|
|
|
+ /* don't want to call dlm if we've unmounted the lock protocol */
|
|
+ if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
|
|
+ gfs2_glock_free(gl);
|
|
+ return;
|
|
+ }
|
|
/* don't want to skip dlm_unlock writing the lvb when lock has one */
|
|
|
|
if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
|
|
diff --git a/fs/io-wq.c b/fs/io-wq.c
|
|
index 8bb17b6d4de3c..3d5fc76b92d01 100644
|
|
--- a/fs/io-wq.c
|
|
+++ b/fs/io-wq.c
|
|
@@ -895,7 +895,7 @@ append:
|
|
static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
|
|
{
|
|
struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
|
|
- int work_flags;
|
|
+ bool do_wake;
|
|
unsigned long flags;
|
|
|
|
/*
|
|
@@ -909,14 +909,14 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
|
|
return;
|
|
}
|
|
|
|
- work_flags = work->flags;
|
|
raw_spin_lock_irqsave(&wqe->lock, flags);
|
|
io_wqe_insert_work(wqe, work);
|
|
wqe->flags &= ~IO_WQE_FLAG_STALLED;
|
|
+ do_wake = (work->flags & IO_WQ_WORK_CONCURRENT) ||
|
|
+ !atomic_read(&acct->nr_running);
|
|
raw_spin_unlock_irqrestore(&wqe->lock, flags);
|
|
|
|
- if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
|
|
- !atomic_read(&acct->nr_running))
|
|
+ if (do_wake)
|
|
io_wqe_wake_worker(wqe, acct);
|
|
}
|
|
|
|
diff --git a/fs/io_uring.c b/fs/io_uring.c
|
|
index 2009d1cda606c..d0089039fee79 100644
|
|
--- a/fs/io_uring.c
|
|
+++ b/fs/io_uring.c
|
|
@@ -1498,6 +1498,8 @@ static void io_kill_timeout(struct io_kiocb *req, int status)
|
|
|
|
ret = hrtimer_try_to_cancel(&io->timer);
|
|
if (ret != -1) {
|
|
+ if (status)
|
|
+ req_set_fail_links(req);
|
|
atomic_set(&req->ctx->cq_timeouts,
|
|
atomic_read(&req->ctx->cq_timeouts) + 1);
|
|
list_del_init(&req->timeout.list);
|
|
@@ -3126,7 +3128,7 @@ static ssize_t __io_import_iovec(int rw, struct io_kiocb *req,
|
|
|
|
ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
|
|
*iovec = NULL;
|
|
- return ret < 0 ? ret : sqe_len;
|
|
+ return ret;
|
|
}
|
|
|
|
if (req->flags & REQ_F_BUFFER_SELECT) {
|
|
@@ -3152,7 +3154,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
|
|
if (!iorw)
|
|
return __io_import_iovec(rw, req, iovec, iter, needs_lock);
|
|
*iovec = NULL;
|
|
- return iov_iter_count(&iorw->iter);
|
|
+ return 0;
|
|
}
|
|
|
|
static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
|
|
@@ -3411,7 +3413,6 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
|
|
struct iov_iter __iter, *iter = &__iter;
|
|
struct io_async_rw *rw = req->async_data;
|
|
ssize_t io_size, ret, ret2;
|
|
- size_t iov_count;
|
|
bool no_async;
|
|
|
|
if (rw)
|
|
@@ -3420,8 +3421,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
|
|
ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
|
|
if (ret < 0)
|
|
return ret;
|
|
- iov_count = iov_iter_count(iter);
|
|
- io_size = ret;
|
|
+ io_size = iov_iter_count(iter);
|
|
req->result = io_size;
|
|
ret = 0;
|
|
|
|
@@ -3437,7 +3437,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
|
|
if (no_async)
|
|
goto copy_iov;
|
|
|
|
- ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count);
|
|
+ ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
|
|
if (unlikely(ret))
|
|
goto out_free;
|
|
|
|
@@ -3456,7 +3456,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
|
|
if (req->file->f_flags & O_NONBLOCK)
|
|
goto done;
|
|
/* some cases will consume bytes even on error returns */
|
|
- iov_iter_revert(iter, iov_count - iov_iter_count(iter));
|
|
+ iov_iter_revert(iter, io_size - iov_iter_count(iter));
|
|
ret = 0;
|
|
goto copy_iov;
|
|
} else if (ret < 0) {
|
|
@@ -3540,7 +3540,6 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
|
|
struct kiocb *kiocb = &req->rw.kiocb;
|
|
struct iov_iter __iter, *iter = &__iter;
|
|
struct io_async_rw *rw = req->async_data;
|
|
- size_t iov_count;
|
|
ssize_t ret, ret2, io_size;
|
|
|
|
if (rw)
|
|
@@ -3549,8 +3548,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
|
|
ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
|
|
if (ret < 0)
|
|
return ret;
|
|
- iov_count = iov_iter_count(iter);
|
|
- io_size = ret;
|
|
+ io_size = iov_iter_count(iter);
|
|
req->result = io_size;
|
|
|
|
/* Ensure we clear previously set non-block flag */
|
|
@@ -3568,7 +3566,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
|
|
(req->flags & REQ_F_ISREG))
|
|
goto copy_iov;
|
|
|
|
- ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), iov_count);
|
|
+ ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
|
|
if (unlikely(ret))
|
|
goto out_free;
|
|
|
|
@@ -3611,7 +3609,7 @@ done:
|
|
} else {
|
|
copy_iov:
|
|
/* some cases will consume bytes even on error returns */
|
|
- iov_iter_revert(iter, iov_count - iov_iter_count(iter));
|
|
+ iov_iter_revert(iter, io_size - iov_iter_count(iter));
|
|
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
|
|
if (!ret)
|
|
return -EAGAIN;
|
|
@@ -3746,7 +3744,8 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|
|
|
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
|
|
return -EINVAL;
|
|
- if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
|
|
+ if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
|
|
+ sqe->splice_fd_in))
|
|
return -EINVAL;
|
|
|
|
req->sync.flags = READ_ONCE(sqe->fsync_flags);
|
|
@@ -3779,7 +3778,8 @@ static int io_fsync(struct io_kiocb *req, bool force_nonblock)
|
|
static int io_fallocate_prep(struct io_kiocb *req,
|
|
const struct io_uring_sqe *sqe)
|
|
{
|
|
- if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
|
|
+ if (sqe->ioprio || sqe->buf_index || sqe->rw_flags ||
|
|
+ sqe->splice_fd_in)
|
|
return -EINVAL;
|
|
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
|
|
return -EINVAL;
|
|
@@ -3810,7 +3810,7 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
|
|
const char __user *fname;
|
|
int ret;
|
|
|
|
- if (unlikely(sqe->ioprio || sqe->buf_index))
|
|
+ if (unlikely(sqe->ioprio || sqe->buf_index || sqe->splice_fd_in))
|
|
return -EINVAL;
|
|
if (unlikely(req->flags & REQ_F_FIXED_FILE))
|
|
return -EBADF;
|
|
@@ -3926,7 +3926,8 @@ static int io_remove_buffers_prep(struct io_kiocb *req,
|
|
struct io_provide_buf *p = &req->pbuf;
|
|
u64 tmp;
|
|
|
|
- if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
|
|
+ if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
|
|
+ sqe->splice_fd_in)
|
|
return -EINVAL;
|
|
|
|
tmp = READ_ONCE(sqe->fd);
|
|
@@ -4002,7 +4003,7 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
|
|
struct io_provide_buf *p = &req->pbuf;
|
|
u64 tmp;
|
|
|
|
- if (sqe->ioprio || sqe->rw_flags)
|
|
+ if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
|
|
return -EINVAL;
|
|
|
|
tmp = READ_ONCE(sqe->fd);
|
|
@@ -4095,7 +4096,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
|
|
const struct io_uring_sqe *sqe)
|
|
{
|
|
#if defined(CONFIG_EPOLL)
|
|
- if (sqe->ioprio || sqe->buf_index)
|
|
+ if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
|
|
return -EINVAL;
|
|
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
|
|
return -EINVAL;
|
|
@@ -4141,7 +4142,7 @@ static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock,
|
|
static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|
{
|
|
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
|
|
- if (sqe->ioprio || sqe->buf_index || sqe->off)
|
|
+ if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in)
|
|
return -EINVAL;
|
|
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
|
|
return -EINVAL;
|
|
@@ -4176,7 +4177,7 @@ static int io_madvise(struct io_kiocb *req, bool force_nonblock)
|
|
|
|
static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|
{
|
|
- if (sqe->ioprio || sqe->buf_index || sqe->addr)
|
|
+ if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in)
|
|
return -EINVAL;
|
|
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
|
|
return -EINVAL;
|
|
@@ -4214,7 +4215,7 @@ static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|
{
|
|
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
|
|
return -EINVAL;
|
|
- if (sqe->ioprio || sqe->buf_index)
|
|
+ if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
|
|
return -EINVAL;
|
|
if (req->flags & REQ_F_FIXED_FILE)
|
|
return -EBADF;
|
|
@@ -4261,7 +4262,7 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
|
|
return -EINVAL;
|
|
if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
|
|
- sqe->rw_flags || sqe->buf_index)
|
|
+ sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
|
|
return -EINVAL;
|
|
if (req->flags & REQ_F_FIXED_FILE)
|
|
return -EBADF;
|
|
@@ -4317,7 +4318,8 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|
|
|
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
|
|
return -EINVAL;
|
|
- if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
|
|
+ if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
|
|
+ sqe->splice_fd_in))
|
|
return -EINVAL;
|
|
|
|
req->sync.off = READ_ONCE(sqe->off);
|
|
@@ -4760,7 +4762,7 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|
|
|
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
|
|
return -EINVAL;
|
|
- if (sqe->ioprio || sqe->len || sqe->buf_index)
|
|
+ if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->splice_fd_in)
|
|
return -EINVAL;
|
|
|
|
accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
|
|
@@ -4801,7 +4803,8 @@ static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|
|
|
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
|
|
return -EINVAL;
|
|
- if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
|
|
+ if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
|
|
+ sqe->splice_fd_in)
|
|
return -EINVAL;
|
|
|
|
conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
|
|
@@ -5553,7 +5556,8 @@ static int io_timeout_remove_prep(struct io_kiocb *req,
|
|
return -EINVAL;
|
|
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
|
|
return -EINVAL;
|
|
- if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->timeout_flags)
|
|
+ if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->timeout_flags |
|
|
+ sqe->splice_fd_in)
|
|
return -EINVAL;
|
|
|
|
req->timeout_rem.addr = READ_ONCE(sqe->addr);
|
|
@@ -5590,7 +5594,8 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
|
|
|
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
|
|
return -EINVAL;
|
|
- if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
|
|
+ if (sqe->ioprio || sqe->buf_index || sqe->len != 1 ||
|
|
+ sqe->splice_fd_in)
|
|
return -EINVAL;
|
|
if (off && is_timeout_link)
|
|
return -EINVAL;
|
|
@@ -5734,7 +5739,8 @@ static int io_async_cancel_prep(struct io_kiocb *req,
|
|
return -EINVAL;
|
|
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
|
|
return -EINVAL;
|
|
- if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
|
|
+ if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags ||
|
|
+ sqe->splice_fd_in)
|
|
return -EINVAL;
|
|
|
|
req->cancel.addr = READ_ONCE(sqe->addr);
|
|
@@ -7383,7 +7389,7 @@ static int io_sqe_alloc_file_tables(struct fixed_file_data *file_data,
|
|
|
|
this_files = min(nr_files, IORING_MAX_FILES_TABLE);
|
|
table->files = kcalloc(this_files, sizeof(struct file *),
|
|
- GFP_KERNEL);
|
|
+ GFP_KERNEL_ACCOUNT);
|
|
if (!table->files)
|
|
break;
|
|
nr_files -= this_files;
|
|
@@ -7579,8 +7585,10 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
|
return -EINVAL;
|
|
if (nr_args > IORING_MAX_FIXED_FILES)
|
|
return -EMFILE;
|
|
+ if (nr_args > rlimit(RLIMIT_NOFILE))
|
|
+ return -EMFILE;
|
|
|
|
- file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL);
|
|
+ file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL_ACCOUNT);
|
|
if (!file_data)
|
|
return -ENOMEM;
|
|
file_data->ctx = ctx;
|
|
@@ -7590,7 +7598,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
|
|
|
nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
|
|
file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
|
|
- GFP_KERNEL);
|
|
+ GFP_KERNEL_ACCOUNT);
|
|
if (!file_data->table)
|
|
goto out_free;
|
|
|
|
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
|
|
index 10cc7979ce380..caed9d98c64aa 100644
|
|
--- a/fs/iomap/buffered-io.c
|
|
+++ b/fs/iomap/buffered-io.c
|
|
@@ -1045,7 +1045,7 @@ iomap_finish_page_writeback(struct inode *inode, struct page *page,
|
|
|
|
if (error) {
|
|
SetPageError(page);
|
|
- mapping_set_error(inode->i_mapping, -EIO);
|
|
+ mapping_set_error(inode->i_mapping, error);
|
|
}
|
|
|
|
WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
|
|
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
|
|
index 498cb70c2c0d0..273a81971ed57 100644
|
|
--- a/fs/lockd/svclock.c
|
|
+++ b/fs/lockd/svclock.c
|
|
@@ -395,28 +395,10 @@ nlmsvc_release_lockowner(struct nlm_lock *lock)
|
|
nlmsvc_put_lockowner(lock->fl.fl_owner);
|
|
}
|
|
|
|
-static void nlmsvc_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
|
|
-{
|
|
- struct nlm_lockowner *nlm_lo = (struct nlm_lockowner *)fl->fl_owner;
|
|
- new->fl_owner = nlmsvc_get_lockowner(nlm_lo);
|
|
-}
|
|
-
|
|
-static void nlmsvc_locks_release_private(struct file_lock *fl)
|
|
-{
|
|
- nlmsvc_put_lockowner((struct nlm_lockowner *)fl->fl_owner);
|
|
-}
|
|
-
|
|
-static const struct file_lock_operations nlmsvc_lock_ops = {
|
|
- .fl_copy_lock = nlmsvc_locks_copy_lock,
|
|
- .fl_release_private = nlmsvc_locks_release_private,
|
|
-};
|
|
-
|
|
void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host,
|
|
pid_t pid)
|
|
{
|
|
fl->fl_owner = nlmsvc_find_lockowner(host, pid);
|
|
- if (fl->fl_owner != NULL)
|
|
- fl->fl_ops = &nlmsvc_lock_ops;
|
|
}
|
|
|
|
/*
|
|
@@ -788,9 +770,21 @@ nlmsvc_notify_blocked(struct file_lock *fl)
|
|
printk(KERN_WARNING "lockd: notification for unknown block!\n");
|
|
}
|
|
|
|
+static fl_owner_t nlmsvc_get_owner(fl_owner_t owner)
|
|
+{
|
|
+ return nlmsvc_get_lockowner(owner);
|
|
+}
|
|
+
|
|
+static void nlmsvc_put_owner(fl_owner_t owner)
|
|
+{
|
|
+ nlmsvc_put_lockowner(owner);
|
|
+}
|
|
+
|
|
const struct lock_manager_operations nlmsvc_lock_operations = {
|
|
.lm_notify = nlmsvc_notify_blocked,
|
|
.lm_grant = nlmsvc_grant_deferred,
|
|
+ .lm_get_owner = nlmsvc_get_owner,
|
|
+ .lm_put_owner = nlmsvc_put_owner,
|
|
};
|
|
|
|
/*
|
|
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
|
|
index 371665e0c154c..5370e082aded5 100644
|
|
--- a/fs/nfs/pnfs.c
|
|
+++ b/fs/nfs/pnfs.c
|
|
@@ -335,7 +335,7 @@ static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
|
|
|
|
static void pnfs_barrier_update(struct pnfs_layout_hdr *lo, u32 newseq)
|
|
{
|
|
- if (pnfs_seqid_is_newer(newseq, lo->plh_barrier))
|
|
+ if (pnfs_seqid_is_newer(newseq, lo->plh_barrier) || !lo->plh_barrier)
|
|
lo->plh_barrier = newseq;
|
|
}
|
|
|
|
@@ -347,11 +347,15 @@ pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
|
|
iomode = IOMODE_ANY;
|
|
lo->plh_return_iomode = iomode;
|
|
set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
|
|
- if (seq != 0) {
|
|
- WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
|
|
+ /*
|
|
+ * We must set lo->plh_return_seq to avoid livelocks with
|
|
+ * pnfs_layout_need_return()
|
|
+ */
|
|
+ if (seq == 0)
|
|
+ seq = be32_to_cpu(lo->plh_stateid.seqid);
|
|
+ if (!lo->plh_return_seq || pnfs_seqid_is_newer(seq, lo->plh_return_seq))
|
|
lo->plh_return_seq = seq;
|
|
- pnfs_barrier_update(lo, seq);
|
|
- }
|
|
+ pnfs_barrier_update(lo, seq);
|
|
}
|
|
|
|
static void
|
|
@@ -1000,7 +1004,7 @@ pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
|
|
{
|
|
u32 seqid = be32_to_cpu(stateid->seqid);
|
|
|
|
- return !pnfs_seqid_is_newer(seqid, lo->plh_barrier) && lo->plh_barrier;
|
|
+ return lo->plh_barrier && pnfs_seqid_is_newer(lo->plh_barrier, seqid);
|
|
}
|
|
|
|
/* lget is set to 1 if called from inside send_layoutget call chain */
|
|
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
|
|
index 142aac9b63a89..0313390fa4b44 100644
|
|
--- a/fs/nfsd/nfs4state.c
|
|
+++ b/fs/nfsd/nfs4state.c
|
|
@@ -6855,8 +6855,7 @@ out:
|
|
/*
|
|
* The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
|
|
* so we do a temporary open here just to get an open file to pass to
|
|
- * vfs_test_lock. (Arguably perhaps test_lock should be done with an
|
|
- * inode operation.)
|
|
+ * vfs_test_lock.
|
|
*/
|
|
static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
|
|
{
|
|
@@ -6871,7 +6870,9 @@ static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct
|
|
NFSD_MAY_READ));
|
|
if (err)
|
|
goto out;
|
|
+ lock->fl_file = nf->nf_file;
|
|
err = nfserrno(vfs_test_lock(nf->nf_file, lock));
|
|
+ lock->fl_file = NULL;
|
|
out:
|
|
fh_unlock(fhp);
|
|
nfsd_file_put(nf);
|
|
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
|
|
index 1192c99536200..c3af99e94f1d1 100644
|
|
--- a/fs/notify/fanotify/fanotify.c
|
|
+++ b/fs/notify/fanotify/fanotify.c
|
|
@@ -129,11 +129,15 @@ static bool fanotify_should_merge(struct fsnotify_event *old_fsn,
|
|
return false;
|
|
}
|
|
|
|
+/* Limit event merges to limit CPU overhead per event */
|
|
+#define FANOTIFY_MAX_MERGE_EVENTS 128
|
|
+
|
|
/* and the list better be locked by something too! */
|
|
static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
|
|
{
|
|
struct fsnotify_event *test_event;
|
|
struct fanotify_event *new;
|
|
+ int i = 0;
|
|
|
|
pr_debug("%s: list=%p event=%p\n", __func__, list, event);
|
|
new = FANOTIFY_E(event);
|
|
@@ -147,6 +151,8 @@ static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
|
|
return 0;
|
|
|
|
list_for_each_entry_reverse(test_event, list, list) {
|
|
+ if (++i > FANOTIFY_MAX_MERGE_EVENTS)
|
|
+ break;
|
|
if (fanotify_should_merge(test_event, event)) {
|
|
FANOTIFY_E(test_event)->mask |= new->mask;
|
|
return 1;
|
|
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
|
|
index d1efa3a5a5032..08b595c526d74 100644
|
|
--- a/fs/overlayfs/dir.c
|
|
+++ b/fs/overlayfs/dir.c
|
|
@@ -542,8 +542,10 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
|
|
goto out_cleanup;
|
|
}
|
|
err = ovl_instantiate(dentry, inode, newdentry, hardlink);
|
|
- if (err)
|
|
- goto out_cleanup;
|
|
+ if (err) {
|
|
+ ovl_cleanup(udir, newdentry);
|
|
+ dput(newdentry);
|
|
+ }
|
|
out_dput:
|
|
dput(upper);
|
|
out_unlock:
|
|
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
|
|
index 3d181b1a6d567..17397c7532f12 100644
|
|
--- a/fs/userfaultfd.c
|
|
+++ b/fs/userfaultfd.c
|
|
@@ -32,11 +32,6 @@ int sysctl_unprivileged_userfaultfd __read_mostly = 1;
|
|
|
|
static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
|
|
|
|
-enum userfaultfd_state {
|
|
- UFFD_STATE_WAIT_API,
|
|
- UFFD_STATE_RUNNING,
|
|
-};
|
|
-
|
|
/*
|
|
* Start with fault_pending_wqh and fault_wqh so they're more likely
|
|
* to be in the same cacheline.
|
|
@@ -68,8 +63,6 @@ struct userfaultfd_ctx {
|
|
unsigned int flags;
|
|
/* features requested from the userspace */
|
|
unsigned int features;
|
|
- /* state machine */
|
|
- enum userfaultfd_state state;
|
|
/* released */
|
|
bool released;
|
|
/* memory mappings are changing because of non-cooperative event */
|
|
@@ -103,6 +96,14 @@ struct userfaultfd_wake_range {
|
|
unsigned long len;
|
|
};
|
|
|
|
+/* internal indication that UFFD_API ioctl was successfully executed */
|
|
+#define UFFD_FEATURE_INITIALIZED (1u << 31)
|
|
+
|
|
+static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
|
|
+{
|
|
+ return ctx->features & UFFD_FEATURE_INITIALIZED;
|
|
+}
|
|
+
|
|
static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
|
|
int wake_flags, void *key)
|
|
{
|
|
@@ -659,7 +660,6 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
|
|
|
|
refcount_set(&ctx->refcount, 1);
|
|
ctx->flags = octx->flags;
|
|
- ctx->state = UFFD_STATE_RUNNING;
|
|
ctx->features = octx->features;
|
|
ctx->released = false;
|
|
ctx->mmap_changing = false;
|
|
@@ -936,38 +936,33 @@ static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
|
|
|
|
poll_wait(file, &ctx->fd_wqh, wait);
|
|
|
|
- switch (ctx->state) {
|
|
- case UFFD_STATE_WAIT_API:
|
|
+ if (!userfaultfd_is_initialized(ctx))
|
|
return EPOLLERR;
|
|
- case UFFD_STATE_RUNNING:
|
|
- /*
|
|
- * poll() never guarantees that read won't block.
|
|
- * userfaults can be waken before they're read().
|
|
- */
|
|
- if (unlikely(!(file->f_flags & O_NONBLOCK)))
|
|
- return EPOLLERR;
|
|
- /*
|
|
- * lockless access to see if there are pending faults
|
|
- * __pollwait last action is the add_wait_queue but
|
|
- * the spin_unlock would allow the waitqueue_active to
|
|
- * pass above the actual list_add inside
|
|
- * add_wait_queue critical section. So use a full
|
|
- * memory barrier to serialize the list_add write of
|
|
- * add_wait_queue() with the waitqueue_active read
|
|
- * below.
|
|
- */
|
|
- ret = 0;
|
|
- smp_mb();
|
|
- if (waitqueue_active(&ctx->fault_pending_wqh))
|
|
- ret = EPOLLIN;
|
|
- else if (waitqueue_active(&ctx->event_wqh))
|
|
- ret = EPOLLIN;
|
|
|
|
- return ret;
|
|
- default:
|
|
- WARN_ON_ONCE(1);
|
|
+ /*
|
|
+ * poll() never guarantees that read won't block.
|
|
+ * userfaults can be waken before they're read().
|
|
+ */
|
|
+ if (unlikely(!(file->f_flags & O_NONBLOCK)))
|
|
return EPOLLERR;
|
|
- }
|
|
+ /*
|
|
+ * lockless access to see if there are pending faults
|
|
+ * __pollwait last action is the add_wait_queue but
|
|
+ * the spin_unlock would allow the waitqueue_active to
|
|
+ * pass above the actual list_add inside
|
|
+ * add_wait_queue critical section. So use a full
|
|
+ * memory barrier to serialize the list_add write of
|
|
+ * add_wait_queue() with the waitqueue_active read
|
|
+ * below.
|
|
+ */
|
|
+ ret = 0;
|
|
+ smp_mb();
|
|
+ if (waitqueue_active(&ctx->fault_pending_wqh))
|
|
+ ret = EPOLLIN;
|
|
+ else if (waitqueue_active(&ctx->event_wqh))
|
|
+ ret = EPOLLIN;
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
static const struct file_operations userfaultfd_fops;
|
|
@@ -1161,7 +1156,7 @@ static ssize_t userfaultfd_read(struct file *file, char __user *buf,
|
|
struct uffd_msg msg;
|
|
int no_wait = file->f_flags & O_NONBLOCK;
|
|
|
|
- if (ctx->state == UFFD_STATE_WAIT_API)
|
|
+ if (!userfaultfd_is_initialized(ctx))
|
|
return -EINVAL;
|
|
|
|
for (;;) {
|
|
@@ -1816,9 +1811,10 @@ static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
|
|
static inline unsigned int uffd_ctx_features(__u64 user_features)
|
|
{
|
|
/*
|
|
- * For the current set of features the bits just coincide
|
|
+ * For the current set of features the bits just coincide. Set
|
|
+ * UFFD_FEATURE_INITIALIZED to mark the features as enabled.
|
|
*/
|
|
- return (unsigned int)user_features;
|
|
+ return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED;
|
|
}
|
|
|
|
/*
|
|
@@ -1831,12 +1827,10 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
|
|
{
|
|
struct uffdio_api uffdio_api;
|
|
void __user *buf = (void __user *)arg;
|
|
+ unsigned int ctx_features;
|
|
int ret;
|
|
__u64 features;
|
|
|
|
- ret = -EINVAL;
|
|
- if (ctx->state != UFFD_STATE_WAIT_API)
|
|
- goto out;
|
|
ret = -EFAULT;
|
|
if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
|
|
goto out;
|
|
@@ -1853,9 +1847,13 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
|
|
ret = -EFAULT;
|
|
if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
|
|
goto out;
|
|
- ctx->state = UFFD_STATE_RUNNING;
|
|
+
|
|
/* only enable the requested features for this uffd context */
|
|
- ctx->features = uffd_ctx_features(features);
|
|
+ ctx_features = uffd_ctx_features(features);
|
|
+ ret = -EINVAL;
|
|
+ if (cmpxchg(&ctx->features, 0, ctx_features) != 0)
|
|
+ goto err_out;
|
|
+
|
|
ret = 0;
|
|
out:
|
|
return ret;
|
|
@@ -1872,7 +1870,7 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd,
|
|
int ret = -EINVAL;
|
|
struct userfaultfd_ctx *ctx = file->private_data;
|
|
|
|
- if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API)
|
|
+ if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx))
|
|
return -EINVAL;
|
|
|
|
switch(cmd) {
|
|
@@ -1976,7 +1974,6 @@ SYSCALL_DEFINE1(userfaultfd, int, flags)
|
|
refcount_set(&ctx->refcount, 1);
|
|
ctx->flags = flags;
|
|
ctx->features = 0;
|
|
- ctx->state = UFFD_STATE_WAIT_API;
|
|
ctx->released = false;
|
|
ctx->mmap_changing = false;
|
|
ctx->mm = current->mm;
|
|
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
|
|
index 948c5203ca9c6..f5bd80858fc51 100644
|
|
--- a/include/crypto/public_key.h
|
|
+++ b/include/crypto/public_key.h
|
|
@@ -39,9 +39,9 @@ extern void public_key_free(struct public_key *key);
|
|
struct public_key_signature {
|
|
struct asymmetric_key_id *auth_ids[2];
|
|
u8 *s; /* Signature */
|
|
- u32 s_size; /* Number of bytes in signature */
|
|
u8 *digest;
|
|
- u8 digest_size; /* Number of bytes in digest */
|
|
+ u32 s_size; /* Number of bytes in signature */
|
|
+ u32 digest_size; /* Number of bytes in digest */
|
|
const char *pkey_algo;
|
|
const char *hash_algo;
|
|
const char *encoding;
|
|
diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h
|
|
index 6bf8b2b789919..f99d3417f3042 100644
|
|
--- a/include/drm/drm_auth.h
|
|
+++ b/include/drm/drm_auth.h
|
|
@@ -107,6 +107,7 @@ struct drm_master {
|
|
};
|
|
|
|
struct drm_master *drm_master_get(struct drm_master *master);
|
|
+struct drm_master *drm_file_get_master(struct drm_file *file_priv);
|
|
void drm_master_put(struct drm_master **master);
|
|
bool drm_is_current_master(struct drm_file *fpriv);
|
|
|
|
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
|
|
index 716990bace104..42d04607d091f 100644
|
|
--- a/include/drm/drm_file.h
|
|
+++ b/include/drm/drm_file.h
|
|
@@ -226,15 +226,27 @@ struct drm_file {
|
|
/**
|
|
* @master:
|
|
*
|
|
- * Master this node is currently associated with. Only relevant if
|
|
- * drm_is_primary_client() returns true. Note that this only
|
|
- * matches &drm_device.master if the master is the currently active one.
|
|
+ * Master this node is currently associated with. Protected by struct
|
|
+ * &drm_device.master_mutex, and serialized by @master_lookup_lock.
|
|
+ *
|
|
+ * Only relevant if drm_is_primary_client() returns true. Note that
|
|
+ * this only matches &drm_device.master if the master is the currently
|
|
+ * active one.
|
|
+ *
|
|
+ * When dereferencing this pointer, either hold struct
|
|
+ * &drm_device.master_mutex for the duration of the pointer's use, or
|
|
+ * use drm_file_get_master() if struct &drm_device.master_mutex is not
|
|
+ * currently held and there is no other need to hold it. This prevents
|
|
+ * @master from being freed during use.
|
|
*
|
|
* See also @authentication and @is_master and the :ref:`section on
|
|
* primary nodes and authentication <drm_primary_node>`.
|
|
*/
|
|
struct drm_master *master;
|
|
|
|
+ /** @master_lock: Serializes @master. */
|
|
+ spinlock_t master_lookup_lock;
|
|
+
|
|
/** @pid: Process that opened this file. */
|
|
struct pid *pid;
|
|
|
|
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
|
|
index 6408b446051f8..b98291d391f34 100644
|
|
--- a/include/linux/ethtool.h
|
|
+++ b/include/linux/ethtool.h
|
|
@@ -17,8 +17,6 @@
|
|
#include <linux/compat.h>
|
|
#include <uapi/linux/ethtool.h>
|
|
|
|
-#ifdef CONFIG_COMPAT
|
|
-
|
|
struct compat_ethtool_rx_flow_spec {
|
|
u32 flow_type;
|
|
union ethtool_flow_union h_u;
|
|
@@ -38,8 +36,6 @@ struct compat_ethtool_rxnfc {
|
|
u32 rule_locs[];
|
|
};
|
|
|
|
-#endif /* CONFIG_COMPAT */
|
|
-
|
|
#include <linux/rculist.h>
|
|
|
|
/**
|
|
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
|
|
index 5b68c9787f7c2..b9fbb6d4150e2 100644
|
|
--- a/include/linux/hugetlb.h
|
|
+++ b/include/linux/hugetlb.h
|
|
@@ -722,6 +722,11 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
|
|
|
|
void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
|
|
|
|
+static inline void hugetlb_count_init(struct mm_struct *mm)
|
|
+{
|
|
+ atomic_long_set(&mm->hugetlb_usage, 0);
|
|
+}
|
|
+
|
|
static inline void hugetlb_count_add(long l, struct mm_struct *mm)
|
|
{
|
|
atomic_long_add(l, &mm->hugetlb_usage);
|
|
@@ -897,6 +902,10 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
|
|
return &mm->page_table_lock;
|
|
}
|
|
|
|
+static inline void hugetlb_count_init(struct mm_struct *mm)
|
|
+{
|
|
+}
|
|
+
|
|
static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
|
|
{
|
|
}
|
|
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
|
|
index 0bff345c4bc68..171bf1be40115 100644
|
|
--- a/include/linux/hugetlb_cgroup.h
|
|
+++ b/include/linux/hugetlb_cgroup.h
|
|
@@ -118,6 +118,13 @@ static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
|
|
css_put(&h_cg->css);
|
|
}
|
|
|
|
+static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
|
|
+ struct resv_map *resv_map)
|
|
+{
|
|
+ if (resv_map->css)
|
|
+ css_get(resv_map->css);
|
|
+}
|
|
+
|
|
extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
|
|
struct hugetlb_cgroup **ptr);
|
|
extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
|
|
@@ -196,6 +203,11 @@ static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
|
|
{
|
|
}
|
|
|
|
+static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
|
|
+ struct resv_map *resv_map)
|
|
+{
|
|
+}
|
|
+
|
|
static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
|
|
struct hugetlb_cgroup **ptr)
|
|
{
|
|
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
|
|
index c00ee3458a919..142ec79cda84f 100644
|
|
--- a/include/linux/intel-iommu.h
|
|
+++ b/include/linux/intel-iommu.h
|
|
@@ -122,9 +122,9 @@
|
|
#define DMAR_MTRR_PHYSMASK8_REG 0x208
|
|
#define DMAR_MTRR_PHYSBASE9_REG 0x210
|
|
#define DMAR_MTRR_PHYSMASK9_REG 0x218
|
|
-#define DMAR_VCCAP_REG 0xe00 /* Virtual command capability register */
|
|
-#define DMAR_VCMD_REG 0xe10 /* Virtual command register */
|
|
-#define DMAR_VCRSP_REG 0xe20 /* Virtual command response register */
|
|
+#define DMAR_VCCAP_REG 0xe30 /* Virtual command capability register */
|
|
+#define DMAR_VCMD_REG 0xe00 /* Virtual command register */
|
|
+#define DMAR_VCRSP_REG 0xe10 /* Virtual command response register */
|
|
|
|
#define OFFSET_STRIDE (9)
|
|
|
|
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
|
|
index 0d7013da818cb..095b3b39bd032 100644
|
|
--- a/include/linux/rcupdate.h
|
|
+++ b/include/linux/rcupdate.h
|
|
@@ -163,7 +163,7 @@ void synchronize_rcu_tasks(void);
|
|
# define synchronize_rcu_tasks synchronize_rcu
|
|
# endif
|
|
|
|
-# ifdef CONFIG_TASKS_RCU_TRACE
|
|
+# ifdef CONFIG_TASKS_TRACE_RCU
|
|
# define rcu_tasks_trace_qs(t) \
|
|
do { \
|
|
if (!likely(READ_ONCE((t)->trc_reader_checked)) && \
|
|
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
|
|
index cad1fa2b6baa2..e7b997d6f0313 100644
|
|
--- a/include/linux/sunrpc/xprt.h
|
|
+++ b/include/linux/sunrpc/xprt.h
|
|
@@ -421,6 +421,7 @@ void xprt_unlock_connect(struct rpc_xprt *, void *);
|
|
#define XPRT_CONGESTED (9)
|
|
#define XPRT_CWND_WAIT (10)
|
|
#define XPRT_WRITE_SPACE (11)
|
|
+#define XPRT_SND_IS_COOKIE (12)
|
|
|
|
static inline void xprt_set_connected(struct rpc_xprt *xprt)
|
|
{
|
|
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
|
|
index 3c1423ee74b4e..8c2a712cb2420 100644
|
|
--- a/include/linux/sunrpc/xprtsock.h
|
|
+++ b/include/linux/sunrpc/xprtsock.h
|
|
@@ -10,6 +10,7 @@
|
|
|
|
int init_socket_xprt(void);
|
|
void cleanup_socket_xprt(void);
|
|
+unsigned short get_srcport(struct rpc_xprt *);
|
|
|
|
#define RPC_MIN_RESVPORT (1U)
|
|
#define RPC_MAX_RESVPORT (65535U)
|
|
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
|
|
index 123b1e9ea304a..010d581598873 100644
|
|
--- a/include/net/flow_offload.h
|
|
+++ b/include/net/flow_offload.h
|
|
@@ -444,6 +444,7 @@ struct flow_block_offload {
|
|
struct list_head *driver_block_list;
|
|
struct netlink_ext_ack *extack;
|
|
struct Qdisc *sch;
|
|
+ struct list_head *cb_list_head;
|
|
};
|
|
|
|
enum tc_setup_type;
|
|
diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
|
|
index be07b5470f4bb..f51bc8f368134 100644
|
|
--- a/include/uapi/linux/serial_reg.h
|
|
+++ b/include/uapi/linux/serial_reg.h
|
|
@@ -62,6 +62,7 @@
|
|
* ST16C654: 8 16 56 60 8 16 32 56 PORT_16654
|
|
* TI16C750: 1 16 32 56 xx xx xx xx PORT_16750
|
|
* TI16C752: 8 16 56 60 8 16 32 56
|
|
+ * OX16C950: 16 32 112 120 16 32 64 112 PORT_16C950
|
|
* Tegra: 1 4 8 14 16 8 4 1 PORT_TEGRA
|
|
*/
|
|
#define UART_FCR_R_TRIG_00 0x00
|
|
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
|
|
index 14de1271463fd..4457545299177 100644
|
|
--- a/kernel/dma/debug.c
|
|
+++ b/kernel/dma/debug.c
|
|
@@ -794,7 +794,7 @@ static int dump_show(struct seq_file *seq, void *v)
|
|
}
|
|
DEFINE_SHOW_ATTRIBUTE(dump);
|
|
|
|
-static void dma_debug_fs_init(void)
|
|
+static int __init dma_debug_fs_init(void)
|
|
{
|
|
struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
|
|
|
|
@@ -807,7 +807,10 @@ static void dma_debug_fs_init(void)
|
|
debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
|
|
debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
|
|
debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
|
|
+
|
|
+ return 0;
|
|
}
|
|
+core_initcall_sync(dma_debug_fs_init);
|
|
|
|
static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
|
|
{
|
|
@@ -892,8 +895,6 @@ static int dma_debug_init(void)
|
|
spin_lock_init(&dma_entry_hash[i].lock);
|
|
}
|
|
|
|
- dma_debug_fs_init();
|
|
-
|
|
nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
|
|
for (i = 0; i < nr_pages; ++i)
|
|
dma_debug_create_entries(GFP_KERNEL);
|
|
diff --git a/kernel/fork.c b/kernel/fork.c
|
|
index 9705439439fe3..3f96400a0ac61 100644
|
|
--- a/kernel/fork.c
|
|
+++ b/kernel/fork.c
|
|
@@ -1037,6 +1037,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
|
|
mm->pmd_huge_pte = NULL;
|
|
#endif
|
|
mm_init_uprobes_state(mm);
|
|
+ hugetlb_count_init(mm);
|
|
|
|
if (current->mm) {
|
|
mm->flags = current->mm->flags & MMF_INIT_MASK;
|
|
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
|
|
index 9de21803a8ae2..ef8733e2a476e 100644
|
|
--- a/kernel/pid_namespace.c
|
|
+++ b/kernel/pid_namespace.c
|
|
@@ -51,7 +51,8 @@ static struct kmem_cache *create_pid_cachep(unsigned int level)
|
|
mutex_lock(&pid_caches_mutex);
|
|
/* Name collision forces to do allocation under mutex. */
|
|
if (!*pkc)
|
|
- *pkc = kmem_cache_create(name, len, 0, SLAB_HWCACHE_ALIGN, 0);
|
|
+ *pkc = kmem_cache_create(name, len, 0,
|
|
+ SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, 0);
|
|
mutex_unlock(&pid_caches_mutex);
|
|
/* current can fail, but someone else can succeed. */
|
|
return READ_ONCE(*pkc);
|
|
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
|
|
index 574aeaac9272d..c5091aeaa37bb 100644
|
|
--- a/kernel/rcu/tree_plugin.h
|
|
+++ b/kernel/rcu/tree_plugin.h
|
|
@@ -2591,17 +2591,17 @@ static void noinstr rcu_dynticks_task_exit(void)
|
|
/* Turn on heavyweight RCU tasks trace readers on idle/user entry. */
|
|
static void rcu_dynticks_task_trace_enter(void)
|
|
{
|
|
-#ifdef CONFIG_TASKS_RCU_TRACE
|
|
+#ifdef CONFIG_TASKS_TRACE_RCU
|
|
if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
|
|
current->trc_reader_special.b.need_mb = true;
|
|
-#endif /* #ifdef CONFIG_TASKS_RCU_TRACE */
|
|
+#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
|
|
}
|
|
|
|
/* Turn off heavyweight RCU tasks trace readers on idle/user exit. */
|
|
static void rcu_dynticks_task_trace_exit(void)
|
|
{
|
|
-#ifdef CONFIG_TASKS_RCU_TRACE
|
|
+#ifdef CONFIG_TASKS_TRACE_RCU
|
|
if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
|
|
current->trc_reader_special.b.need_mb = false;
|
|
-#endif /* #ifdef CONFIG_TASKS_RCU_TRACE */
|
|
+#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
|
|
}
|
|
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
|
|
index 51d19fc71e616..4cb622b2661b5 100644
|
|
--- a/kernel/workqueue.c
|
|
+++ b/kernel/workqueue.c
|
|
@@ -5893,6 +5893,13 @@ static void __init wq_numa_init(void)
|
|
return;
|
|
}
|
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
+ if (WARN_ON(cpu_to_node(cpu) == NUMA_NO_NODE)) {
|
|
+ pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+
|
|
wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
|
|
BUG_ON(!wq_update_unbound_numa_attrs_buf);
|
|
|
|
@@ -5910,11 +5917,6 @@ static void __init wq_numa_init(void)
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
node = cpu_to_node(cpu);
|
|
- if (WARN_ON(node == NUMA_NO_NODE)) {
|
|
- pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
|
|
- /* happens iff arch is bonkers, let's just proceed */
|
|
- return;
|
|
- }
|
|
cpumask_set_cpu(cpu, tbl[node]);
|
|
}
|
|
|
|
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
|
|
index ca7d635bccd9d..4a9137c8551ad 100644
|
|
--- a/lib/test_bpf.c
|
|
+++ b/lib/test_bpf.c
|
|
@@ -4286,8 +4286,8 @@ static struct bpf_test tests[] = {
|
|
.u.insns_int = {
|
|
BPF_LD_IMM64(R0, 0),
|
|
BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
|
|
- BPF_STX_MEM(BPF_W, R10, R1, -40),
|
|
- BPF_LDX_MEM(BPF_W, R0, R10, -40),
|
|
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
|
|
+ BPF_LDX_MEM(BPF_DW, R0, R10, -40),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
INTERNAL,
|
|
@@ -6664,7 +6664,14 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
|
|
u64 duration;
|
|
u32 ret;
|
|
|
|
- if (test->test[i].data_size == 0 &&
|
|
+ /*
|
|
+ * NOTE: Several sub-tests may be present, in which case
|
|
+ * a zero {data_size, result} tuple indicates the end of
|
|
+ * the sub-test array. The first test is always run,
|
|
+ * even if both data_size and result happen to be zero.
|
|
+ */
|
|
+ if (i > 0 &&
|
|
+ test->test[i].data_size == 0 &&
|
|
test->test[i].result == 0)
|
|
break;
|
|
|
|
diff --git a/lib/test_stackinit.c b/lib/test_stackinit.c
|
|
index f93b1e145ada7..16b1d3a3a4975 100644
|
|
--- a/lib/test_stackinit.c
|
|
+++ b/lib/test_stackinit.c
|
|
@@ -67,10 +67,10 @@ static bool range_contains(char *haystack_start, size_t haystack_size,
|
|
#define INIT_STRUCT_none /**/
|
|
#define INIT_STRUCT_zero = { }
|
|
#define INIT_STRUCT_static_partial = { .two = 0, }
|
|
-#define INIT_STRUCT_static_all = { .one = arg->one, \
|
|
- .two = arg->two, \
|
|
- .three = arg->three, \
|
|
- .four = arg->four, \
|
|
+#define INIT_STRUCT_static_all = { .one = 0, \
|
|
+ .two = 0, \
|
|
+ .three = 0, \
|
|
+ .four = 0, \
|
|
}
|
|
#define INIT_STRUCT_dynamic_partial = { .two = arg->two, }
|
|
#define INIT_STRUCT_dynamic_all = { .one = arg->one, \
|
|
@@ -84,8 +84,7 @@ static bool range_contains(char *haystack_start, size_t haystack_size,
|
|
var.one = 0; \
|
|
var.two = 0; \
|
|
var.three = 0; \
|
|
- memset(&var.four, 0, \
|
|
- sizeof(var.four))
|
|
+ var.four = 0
|
|
|
|
/*
|
|
* @name: unique string name for the test
|
|
@@ -210,18 +209,13 @@ struct test_small_hole {
|
|
unsigned long four;
|
|
};
|
|
|
|
-/* Try to trigger unhandled padding in a structure. */
|
|
-struct test_aligned {
|
|
- u32 internal1;
|
|
- u64 internal2;
|
|
-} __aligned(64);
|
|
-
|
|
+/* Trigger unhandled padding in a structure. */
|
|
struct test_big_hole {
|
|
u8 one;
|
|
u8 two;
|
|
u8 three;
|
|
/* 61 byte padding hole here. */
|
|
- struct test_aligned four;
|
|
+ u8 four __aligned(64);
|
|
} __aligned(64);
|
|
|
|
struct test_trailing_hole {
|
|
diff --git a/mm/hmm.c b/mm/hmm.c
|
|
index 943cb2ba44423..fb617054f9631 100644
|
|
--- a/mm/hmm.c
|
|
+++ b/mm/hmm.c
|
|
@@ -291,10 +291,13 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
|
|
goto fault;
|
|
|
|
/*
|
|
+ * Bypass devmap pte such as DAX page when all pfn requested
|
|
+ * flags(pfn_req_flags) are fulfilled.
|
|
* Since each architecture defines a struct page for the zero page, just
|
|
* fall through and treat it like a normal page.
|
|
*/
|
|
- if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) {
|
|
+ if (pte_special(pte) && !pte_devmap(pte) &&
|
|
+ !is_zero_pfn(pte_pfn(pte))) {
|
|
if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
|
|
pte_unmap(ptep);
|
|
return -EFAULT;
|
|
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
|
|
index fa6b0ac6c280d..6e92ab0ae070f 100644
|
|
--- a/mm/hugetlb.c
|
|
+++ b/mm/hugetlb.c
|
|
@@ -3659,8 +3659,10 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
|
|
* after this open call completes. It is therefore safe to take a
|
|
* new reference here without additional locking.
|
|
*/
|
|
- if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
|
|
+ if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
|
|
+ resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
|
|
kref_get(&resv->refs);
|
|
+ }
|
|
}
|
|
|
|
static void hugetlb_vm_op_close(struct vm_area_struct *vma)
|
|
diff --git a/mm/vmscan.c b/mm/vmscan.c
|
|
index 7fb9af001ed5c..f2817e80a1ab3 100644
|
|
--- a/mm/vmscan.c
|
|
+++ b/mm/vmscan.c
|
|
@@ -2378,7 +2378,7 @@ out:
|
|
cgroup_size = max(cgroup_size, protection);
|
|
|
|
scan = lruvec_size - lruvec_size * protection /
|
|
- cgroup_size;
|
|
+ (cgroup_size + 1);
|
|
|
|
/*
|
|
* Minimally target SWAP_CLUSTER_MAX pages to keep
|
|
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
|
|
index f4fea28e05da6..3ec1a51a6944e 100644
|
|
--- a/net/9p/trans_xen.c
|
|
+++ b/net/9p/trans_xen.c
|
|
@@ -138,7 +138,7 @@ static bool p9_xen_write_todo(struct xen_9pfs_dataring *ring, RING_IDX size)
|
|
|
|
static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
|
|
{
|
|
- struct xen_9pfs_front_priv *priv = NULL;
|
|
+ struct xen_9pfs_front_priv *priv;
|
|
RING_IDX cons, prod, masked_cons, masked_prod;
|
|
unsigned long flags;
|
|
u32 size = p9_req->tc.size;
|
|
@@ -151,7 +151,7 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
|
|
break;
|
|
}
|
|
read_unlock(&xen_9pfs_lock);
|
|
- if (!priv || priv->client != client)
|
|
+ if (list_entry_is_head(priv, &xen_9pfs_devs, list))
|
|
return -EINVAL;
|
|
|
|
num = p9_req->tc.tag % priv->num_rings;
|
|
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
|
|
index e59ae24a8f17f..9f52145bb7b76 100644
|
|
--- a/net/bluetooth/hci_event.c
|
|
+++ b/net/bluetooth/hci_event.c
|
|
@@ -4329,6 +4329,21 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
|
|
|
|
switch (ev->status) {
|
|
case 0x00:
|
|
+ /* The synchronous connection complete event should only be
|
|
+ * sent once per new connection. Receiving a successful
|
|
+ * complete event when the connection status is already
|
|
+ * BT_CONNECTED means that the device is misbehaving and sent
|
|
+ * multiple complete event packets for the same new connection.
|
|
+ *
|
|
+ * Registering the device more than once can corrupt kernel
|
|
+ * memory, hence upon detecting this invalid event, we report
|
|
+ * an error and ignore the packet.
|
|
+ */
|
|
+ if (conn->state == BT_CONNECTED) {
|
|
+ bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
|
|
+ goto unlock;
|
|
+ }
|
|
+
|
|
conn->handle = __le16_to_cpu(ev->handle);
|
|
conn->state = BT_CONNECTED;
|
|
conn->type = ev->link_type;
|
|
@@ -5055,9 +5070,64 @@ static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
|
|
}
|
|
#endif
|
|
|
|
+static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
|
|
+ u8 bdaddr_type, bdaddr_t *local_rpa)
|
|
+{
|
|
+ if (conn->out) {
|
|
+ conn->dst_type = bdaddr_type;
|
|
+ conn->resp_addr_type = bdaddr_type;
|
|
+ bacpy(&conn->resp_addr, bdaddr);
|
|
+
|
|
+ /* Check if the controller has set a Local RPA then it must be
|
|
+ * used instead or hdev->rpa.
|
|
+ */
|
|
+ if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
|
|
+ conn->init_addr_type = ADDR_LE_DEV_RANDOM;
|
|
+ bacpy(&conn->init_addr, local_rpa);
|
|
+ } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
|
|
+ conn->init_addr_type = ADDR_LE_DEV_RANDOM;
|
|
+ bacpy(&conn->init_addr, &conn->hdev->rpa);
|
|
+ } else {
|
|
+ hci_copy_identity_address(conn->hdev, &conn->init_addr,
|
|
+ &conn->init_addr_type);
|
|
+ }
|
|
+ } else {
|
|
+ conn->resp_addr_type = conn->hdev->adv_addr_type;
|
|
+ /* Check if the controller has set a Local RPA then it must be
|
|
+ * used instead or hdev->rpa.
|
|
+ */
|
|
+ if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
|
|
+ conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
|
|
+ bacpy(&conn->resp_addr, local_rpa);
|
|
+ } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
|
|
+ /* In case of ext adv, resp_addr will be updated in
|
|
+ * Adv Terminated event.
|
|
+ */
|
|
+ if (!ext_adv_capable(conn->hdev))
|
|
+ bacpy(&conn->resp_addr,
|
|
+ &conn->hdev->random_addr);
|
|
+ } else {
|
|
+ bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
|
|
+ }
|
|
+
|
|
+ conn->init_addr_type = bdaddr_type;
|
|
+ bacpy(&conn->init_addr, bdaddr);
|
|
+
|
|
+ /* For incoming connections, set the default minimum
|
|
+ * and maximum connection interval. They will be used
|
|
+ * to check if the parameters are in range and if not
|
|
+ * trigger the connection update procedure.
|
|
+ */
|
|
+ conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
|
|
+ conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
|
|
+ }
|
|
+}
|
|
+
|
|
static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
|
|
- bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
|
|
- u16 interval, u16 latency, u16 supervision_timeout)
|
|
+ bdaddr_t *bdaddr, u8 bdaddr_type,
|
|
+ bdaddr_t *local_rpa, u8 role, u16 handle,
|
|
+ u16 interval, u16 latency,
|
|
+ u16 supervision_timeout)
|
|
{
|
|
struct hci_conn_params *params;
|
|
struct hci_conn *conn;
|
|
@@ -5105,32 +5175,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
|
|
cancel_delayed_work(&conn->le_conn_timeout);
|
|
}
|
|
|
|
- if (!conn->out) {
|
|
- /* Set the responder (our side) address type based on
|
|
- * the advertising address type.
|
|
- */
|
|
- conn->resp_addr_type = hdev->adv_addr_type;
|
|
- if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
|
|
- /* In case of ext adv, resp_addr will be updated in
|
|
- * Adv Terminated event.
|
|
- */
|
|
- if (!ext_adv_capable(hdev))
|
|
- bacpy(&conn->resp_addr, &hdev->random_addr);
|
|
- } else {
|
|
- bacpy(&conn->resp_addr, &hdev->bdaddr);
|
|
- }
|
|
-
|
|
- conn->init_addr_type = bdaddr_type;
|
|
- bacpy(&conn->init_addr, bdaddr);
|
|
-
|
|
- /* For incoming connections, set the default minimum
|
|
- * and maximum connection interval. They will be used
|
|
- * to check if the parameters are in range and if not
|
|
- * trigger the connection update procedure.
|
|
- */
|
|
- conn->le_conn_min_interval = hdev->le_conn_min_interval;
|
|
- conn->le_conn_max_interval = hdev->le_conn_max_interval;
|
|
- }
|
|
+ le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
|
|
|
|
/* Lookup the identity address from the stored connection
|
|
* address and address type.
|
|
@@ -5224,7 +5269,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
|
BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
|
|
|
|
le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
|
|
- ev->role, le16_to_cpu(ev->handle),
|
|
+ NULL, ev->role, le16_to_cpu(ev->handle),
|
|
le16_to_cpu(ev->interval),
|
|
le16_to_cpu(ev->latency),
|
|
le16_to_cpu(ev->supervision_timeout));
|
|
@@ -5238,7 +5283,7 @@ static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
|
|
BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
|
|
|
|
le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
|
|
- ev->role, le16_to_cpu(ev->handle),
|
|
+ &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
|
|
le16_to_cpu(ev->interval),
|
|
le16_to_cpu(ev->latency),
|
|
le16_to_cpu(ev->supervision_timeout));
|
|
@@ -5274,7 +5319,8 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
|
if (conn) {
|
|
struct adv_info *adv_instance;
|
|
|
|
- if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
|
|
+ if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
|
|
+ bacmp(&conn->resp_addr, BDADDR_ANY))
|
|
return;
|
|
|
|
if (!hdev->cur_adv_instance) {
|
|
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
|
|
index 600b1832e1dd6..7c24a9acbc459 100644
|
|
--- a/net/bluetooth/sco.c
|
|
+++ b/net/bluetooth/sco.c
|
|
@@ -48,6 +48,8 @@ struct sco_conn {
|
|
spinlock_t lock;
|
|
struct sock *sk;
|
|
|
|
+ struct delayed_work timeout_work;
|
|
+
|
|
unsigned int mtu;
|
|
};
|
|
|
|
@@ -74,9 +76,20 @@ struct sco_pinfo {
|
|
#define SCO_CONN_TIMEOUT (HZ * 40)
|
|
#define SCO_DISCONN_TIMEOUT (HZ * 2)
|
|
|
|
-static void sco_sock_timeout(struct timer_list *t)
|
|
+static void sco_sock_timeout(struct work_struct *work)
|
|
{
|
|
- struct sock *sk = from_timer(sk, t, sk_timer);
|
|
+ struct sco_conn *conn = container_of(work, struct sco_conn,
|
|
+ timeout_work.work);
|
|
+ struct sock *sk;
|
|
+
|
|
+ sco_conn_lock(conn);
|
|
+ sk = conn->sk;
|
|
+ if (sk)
|
|
+ sock_hold(sk);
|
|
+ sco_conn_unlock(conn);
|
|
+
|
|
+ if (!sk)
|
|
+ return;
|
|
|
|
BT_DBG("sock %p state %d", sk, sk->sk_state);
|
|
|
|
@@ -90,14 +103,21 @@ static void sco_sock_timeout(struct timer_list *t)
|
|
|
|
static void sco_sock_set_timer(struct sock *sk, long timeout)
|
|
{
|
|
+ if (!sco_pi(sk)->conn)
|
|
+ return;
|
|
+
|
|
BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout);
|
|
- sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
|
|
+ cancel_delayed_work(&sco_pi(sk)->conn->timeout_work);
|
|
+ schedule_delayed_work(&sco_pi(sk)->conn->timeout_work, timeout);
|
|
}
|
|
|
|
static void sco_sock_clear_timer(struct sock *sk)
|
|
{
|
|
+ if (!sco_pi(sk)->conn)
|
|
+ return;
|
|
+
|
|
BT_DBG("sock %p state %d", sk, sk->sk_state);
|
|
- sk_stop_timer(sk, &sk->sk_timer);
|
|
+ cancel_delayed_work(&sco_pi(sk)->conn->timeout_work);
|
|
}
|
|
|
|
/* ---- SCO connections ---- */
|
|
@@ -177,6 +197,9 @@ static void sco_conn_del(struct hci_conn *hcon, int err)
|
|
sco_chan_del(sk, err);
|
|
bh_unlock_sock(sk);
|
|
sock_put(sk);
|
|
+
|
|
+ /* Ensure no more work items will run before freeing conn. */
|
|
+ cancel_delayed_work_sync(&conn->timeout_work);
|
|
}
|
|
|
|
hcon->sco_data = NULL;
|
|
@@ -191,6 +214,8 @@ static void __sco_chan_add(struct sco_conn *conn, struct sock *sk,
|
|
sco_pi(sk)->conn = conn;
|
|
conn->sk = sk;
|
|
|
|
+ INIT_DELAYED_WORK(&conn->timeout_work, sco_sock_timeout);
|
|
+
|
|
if (parent)
|
|
bt_accept_enqueue(parent, sk, true);
|
|
}
|
|
@@ -210,44 +235,32 @@ static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
|
|
return err;
|
|
}
|
|
|
|
-static int sco_connect(struct sock *sk)
|
|
+static int sco_connect(struct hci_dev *hdev, struct sock *sk)
|
|
{
|
|
struct sco_conn *conn;
|
|
struct hci_conn *hcon;
|
|
- struct hci_dev *hdev;
|
|
int err, type;
|
|
|
|
BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst);
|
|
|
|
- hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR);
|
|
- if (!hdev)
|
|
- return -EHOSTUNREACH;
|
|
-
|
|
- hci_dev_lock(hdev);
|
|
-
|
|
if (lmp_esco_capable(hdev) && !disable_esco)
|
|
type = ESCO_LINK;
|
|
else
|
|
type = SCO_LINK;
|
|
|
|
if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT &&
|
|
- (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) {
|
|
- err = -EOPNOTSUPP;
|
|
- goto done;
|
|
- }
|
|
+ (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev)))
|
|
+ return -EOPNOTSUPP;
|
|
|
|
hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
|
|
sco_pi(sk)->setting);
|
|
- if (IS_ERR(hcon)) {
|
|
- err = PTR_ERR(hcon);
|
|
- goto done;
|
|
- }
|
|
+ if (IS_ERR(hcon))
|
|
+ return PTR_ERR(hcon);
|
|
|
|
conn = sco_conn_add(hcon);
|
|
if (!conn) {
|
|
hci_conn_drop(hcon);
|
|
- err = -ENOMEM;
|
|
- goto done;
|
|
+ return -ENOMEM;
|
|
}
|
|
|
|
/* Update source addr of the socket */
|
|
@@ -255,7 +268,7 @@ static int sco_connect(struct sock *sk)
|
|
|
|
err = sco_chan_add(conn, sk, NULL);
|
|
if (err)
|
|
- goto done;
|
|
+ return err;
|
|
|
|
if (hcon->state == BT_CONNECTED) {
|
|
sco_sock_clear_timer(sk);
|
|
@@ -265,9 +278,6 @@ static int sco_connect(struct sock *sk)
|
|
sco_sock_set_timer(sk, sk->sk_sndtimeo);
|
|
}
|
|
|
|
-done:
|
|
- hci_dev_unlock(hdev);
|
|
- hci_dev_put(hdev);
|
|
return err;
|
|
}
|
|
|
|
@@ -496,8 +506,6 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock,
|
|
|
|
sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT;
|
|
|
|
- timer_setup(&sk->sk_timer, sco_sock_timeout, 0);
|
|
-
|
|
bt_sock_link(&sco_sk_list, sk);
|
|
return sk;
|
|
}
|
|
@@ -562,6 +570,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
|
|
{
|
|
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
|
|
struct sock *sk = sock->sk;
|
|
+ struct hci_dev *hdev;
|
|
int err;
|
|
|
|
BT_DBG("sk %p", sk);
|
|
@@ -576,12 +585,19 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
|
|
if (sk->sk_type != SOCK_SEQPACKET)
|
|
return -EINVAL;
|
|
|
|
+ hdev = hci_get_route(&sa->sco_bdaddr, &sco_pi(sk)->src, BDADDR_BREDR);
|
|
+ if (!hdev)
|
|
+ return -EHOSTUNREACH;
|
|
+ hci_dev_lock(hdev);
|
|
+
|
|
lock_sock(sk);
|
|
|
|
/* Set destination address and psm */
|
|
bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr);
|
|
|
|
- err = sco_connect(sk);
|
|
+ err = sco_connect(hdev, sk);
|
|
+ hci_dev_unlock(hdev);
|
|
+ hci_dev_put(hdev);
|
|
if (err)
|
|
goto done;
|
|
|
|
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
|
|
index c52e5ea654e99..813c709c61cfb 100644
|
|
--- a/net/core/flow_dissector.c
|
|
+++ b/net/core/flow_dissector.c
|
|
@@ -1047,8 +1047,10 @@ proto_again:
|
|
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
|
|
target_container);
|
|
|
|
- memcpy(&key_addrs->v4addrs, &iph->saddr,
|
|
- sizeof(key_addrs->v4addrs));
|
|
+ memcpy(&key_addrs->v4addrs.src, &iph->saddr,
|
|
+ sizeof(key_addrs->v4addrs.src));
|
|
+ memcpy(&key_addrs->v4addrs.dst, &iph->daddr,
|
|
+ sizeof(key_addrs->v4addrs.dst));
|
|
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
|
|
}
|
|
|
|
@@ -1092,8 +1094,10 @@ proto_again:
|
|
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
|
|
target_container);
|
|
|
|
- memcpy(&key_addrs->v6addrs, &iph->saddr,
|
|
- sizeof(key_addrs->v6addrs));
|
|
+ memcpy(&key_addrs->v6addrs.src, &iph->saddr,
|
|
+ sizeof(key_addrs->v6addrs.src));
|
|
+ memcpy(&key_addrs->v6addrs.dst, &iph->daddr,
|
|
+ sizeof(key_addrs->v6addrs.dst));
|
|
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
|
|
}
|
|
|
|
diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
|
|
index 715b67f6c62f3..e3f0d59068117 100644
|
|
--- a/net/core/flow_offload.c
|
|
+++ b/net/core/flow_offload.c
|
|
@@ -321,6 +321,7 @@ EXPORT_SYMBOL(flow_block_cb_setup_simple);
|
|
static DEFINE_MUTEX(flow_indr_block_lock);
|
|
static LIST_HEAD(flow_block_indr_list);
|
|
static LIST_HEAD(flow_block_indr_dev_list);
|
|
+static LIST_HEAD(flow_indir_dev_list);
|
|
|
|
struct flow_indr_dev {
|
|
struct list_head list;
|
|
@@ -346,6 +347,33 @@ static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
|
|
return indr_dev;
|
|
}
|
|
|
|
+struct flow_indir_dev_info {
|
|
+ void *data;
|
|
+ struct net_device *dev;
|
|
+ struct Qdisc *sch;
|
|
+ enum tc_setup_type type;
|
|
+ void (*cleanup)(struct flow_block_cb *block_cb);
|
|
+ struct list_head list;
|
|
+ enum flow_block_command command;
|
|
+ enum flow_block_binder_type binder_type;
|
|
+ struct list_head *cb_list;
|
|
+};
|
|
+
|
|
+static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
|
|
+{
|
|
+ struct flow_block_offload bo;
|
|
+ struct flow_indir_dev_info *cur;
|
|
+
|
|
+ list_for_each_entry(cur, &flow_indir_dev_list, list) {
|
|
+ memset(&bo, 0, sizeof(bo));
|
|
+ bo.command = cur->command;
|
|
+ bo.binder_type = cur->binder_type;
|
|
+ INIT_LIST_HEAD(&bo.cb_list);
|
|
+ cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
|
|
+ list_splice(&bo.cb_list, cur->cb_list);
|
|
+ }
|
|
+}
|
|
+
|
|
int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
|
|
{
|
|
struct flow_indr_dev *indr_dev;
|
|
@@ -367,6 +395,7 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
|
|
}
|
|
|
|
list_add(&indr_dev->list, &flow_block_indr_dev_list);
|
|
+ existing_qdiscs_register(cb, cb_priv);
|
|
mutex_unlock(&flow_indr_block_lock);
|
|
|
|
return 0;
|
|
@@ -463,7 +492,59 @@ out:
|
|
}
|
|
EXPORT_SYMBOL(flow_indr_block_cb_alloc);
|
|
|
|
-int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
|
|
+static struct flow_indir_dev_info *find_indir_dev(void *data)
|
|
+{
|
|
+ struct flow_indir_dev_info *cur;
|
|
+
|
|
+ list_for_each_entry(cur, &flow_indir_dev_list, list) {
|
|
+ if (cur->data == data)
|
|
+ return cur;
|
|
+ }
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
|
|
+ enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb),
|
|
+ struct flow_block_offload *bo)
|
|
+{
|
|
+ struct flow_indir_dev_info *info;
|
|
+
|
|
+ info = find_indir_dev(data);
|
|
+ if (info)
|
|
+ return -EEXIST;
|
|
+
|
|
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
|
|
+ if (!info)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ info->data = data;
|
|
+ info->dev = dev;
|
|
+ info->sch = sch;
|
|
+ info->type = type;
|
|
+ info->cleanup = cleanup;
|
|
+ info->command = bo->command;
|
|
+ info->binder_type = bo->binder_type;
|
|
+ info->cb_list = bo->cb_list_head;
|
|
+
|
|
+ list_add(&info->list, &flow_indir_dev_list);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int indir_dev_remove(void *data)
|
|
+{
|
|
+ struct flow_indir_dev_info *info;
|
|
+
|
|
+ info = find_indir_dev(data);
|
|
+ if (!info)
|
|
+ return -ENOENT;
|
|
+
|
|
+ list_del(&info->list);
|
|
+
|
|
+ kfree(info);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
|
|
enum tc_setup_type type, void *data,
|
|
struct flow_block_offload *bo,
|
|
void (*cleanup)(struct flow_block_cb *block_cb))
|
|
@@ -471,6 +552,12 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
|
|
struct flow_indr_dev *this;
|
|
|
|
mutex_lock(&flow_indr_block_lock);
|
|
+
|
|
+ if (bo->command == FLOW_BLOCK_BIND)
|
|
+ indir_dev_add(data, dev, sch, type, cleanup, bo);
|
|
+ else if (bo->command == FLOW_BLOCK_UNBIND)
|
|
+ indir_dev_remove(data);
|
|
+
|
|
list_for_each_entry(this, &flow_block_indr_dev_list, list)
|
|
this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
|
|
|
|
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
|
|
index 68ff19af195c6..97b402b2d6fbd 100644
|
|
--- a/net/ethtool/ioctl.c
|
|
+++ b/net/ethtool/ioctl.c
|
|
@@ -7,6 +7,7 @@
|
|
* the information ethtool needs.
|
|
*/
|
|
|
|
+#include <linux/compat.h>
|
|
#include <linux/module.h>
|
|
#include <linux/types.h>
|
|
#include <linux/capability.h>
|
|
@@ -807,6 +808,120 @@ out:
|
|
return ret;
|
|
}
|
|
|
|
+static noinline_for_stack int
|
|
+ethtool_rxnfc_copy_from_compat(struct ethtool_rxnfc *rxnfc,
|
|
+ const struct compat_ethtool_rxnfc __user *useraddr,
|
|
+ size_t size)
|
|
+{
|
|
+ struct compat_ethtool_rxnfc crxnfc = {};
|
|
+
|
|
+ /* We expect there to be holes between fs.m_ext and
|
|
+ * fs.ring_cookie and at the end of fs, but nowhere else.
|
|
+ * On non-x86, no conversion should be needed.
|
|
+ */
|
|
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_X86_64) &&
|
|
+ sizeof(struct compat_ethtool_rxnfc) !=
|
|
+ sizeof(struct ethtool_rxnfc));
|
|
+ BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) +
|
|
+ sizeof(useraddr->fs.m_ext) !=
|
|
+ offsetof(struct ethtool_rxnfc, fs.m_ext) +
|
|
+ sizeof(rxnfc->fs.m_ext));
|
|
+ BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.location) -
|
|
+ offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) !=
|
|
+ offsetof(struct ethtool_rxnfc, fs.location) -
|
|
+ offsetof(struct ethtool_rxnfc, fs.ring_cookie));
|
|
+
|
|
+ if (copy_from_user(&crxnfc, useraddr, min(size, sizeof(crxnfc))))
|
|
+ return -EFAULT;
|
|
+
|
|
+ *rxnfc = (struct ethtool_rxnfc) {
|
|
+ .cmd = crxnfc.cmd,
|
|
+ .flow_type = crxnfc.flow_type,
|
|
+ .data = crxnfc.data,
|
|
+ .fs = {
|
|
+ .flow_type = crxnfc.fs.flow_type,
|
|
+ .h_u = crxnfc.fs.h_u,
|
|
+ .h_ext = crxnfc.fs.h_ext,
|
|
+ .m_u = crxnfc.fs.m_u,
|
|
+ .m_ext = crxnfc.fs.m_ext,
|
|
+ .ring_cookie = crxnfc.fs.ring_cookie,
|
|
+ .location = crxnfc.fs.location,
|
|
+ },
|
|
+ .rule_cnt = crxnfc.rule_cnt,
|
|
+ };
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int ethtool_rxnfc_copy_from_user(struct ethtool_rxnfc *rxnfc,
|
|
+ const void __user *useraddr,
|
|
+ size_t size)
|
|
+{
|
|
+ if (compat_need_64bit_alignment_fixup())
|
|
+ return ethtool_rxnfc_copy_from_compat(rxnfc, useraddr, size);
|
|
+
|
|
+ if (copy_from_user(rxnfc, useraddr, size))
|
|
+ return -EFAULT;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int ethtool_rxnfc_copy_to_compat(void __user *useraddr,
|
|
+ const struct ethtool_rxnfc *rxnfc,
|
|
+ size_t size, const u32 *rule_buf)
|
|
+{
|
|
+ struct compat_ethtool_rxnfc crxnfc;
|
|
+
|
|
+ memset(&crxnfc, 0, sizeof(crxnfc));
|
|
+ crxnfc = (struct compat_ethtool_rxnfc) {
|
|
+ .cmd = rxnfc->cmd,
|
|
+ .flow_type = rxnfc->flow_type,
|
|
+ .data = rxnfc->data,
|
|
+ .fs = {
|
|
+ .flow_type = rxnfc->fs.flow_type,
|
|
+ .h_u = rxnfc->fs.h_u,
|
|
+ .h_ext = rxnfc->fs.h_ext,
|
|
+ .m_u = rxnfc->fs.m_u,
|
|
+ .m_ext = rxnfc->fs.m_ext,
|
|
+ .ring_cookie = rxnfc->fs.ring_cookie,
|
|
+ .location = rxnfc->fs.location,
|
|
+ },
|
|
+ .rule_cnt = rxnfc->rule_cnt,
|
|
+ };
|
|
+
|
|
+ if (copy_to_user(useraddr, &crxnfc, min(size, sizeof(crxnfc))))
|
|
+ return -EFAULT;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int ethtool_rxnfc_copy_to_user(void __user *useraddr,
|
|
+ const struct ethtool_rxnfc *rxnfc,
|
|
+ size_t size, const u32 *rule_buf)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (compat_need_64bit_alignment_fixup()) {
|
|
+ ret = ethtool_rxnfc_copy_to_compat(useraddr, rxnfc, size,
|
|
+ rule_buf);
|
|
+ useraddr += offsetof(struct compat_ethtool_rxnfc, rule_locs);
|
|
+ } else {
|
|
+ ret = copy_to_user(useraddr, &rxnfc, size);
|
|
+ useraddr += offsetof(struct ethtool_rxnfc, rule_locs);
|
|
+ }
|
|
+
|
|
+ if (ret)
|
|
+ return -EFAULT;
|
|
+
|
|
+ if (rule_buf) {
|
|
+ if (copy_to_user(useraddr, rule_buf,
|
|
+ rxnfc->rule_cnt * sizeof(u32)))
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
|
|
u32 cmd, void __user *useraddr)
|
|
{
|
|
@@ -825,7 +940,7 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
|
|
info_size = (offsetof(struct ethtool_rxnfc, data) +
|
|
sizeof(info.data));
|
|
|
|
- if (copy_from_user(&info, useraddr, info_size))
|
|
+ if (ethtool_rxnfc_copy_from_user(&info, useraddr, info_size))
|
|
return -EFAULT;
|
|
|
|
rc = dev->ethtool_ops->set_rxnfc(dev, &info);
|
|
@@ -833,7 +948,7 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
|
|
return rc;
|
|
|
|
if (cmd == ETHTOOL_SRXCLSRLINS &&
|
|
- copy_to_user(useraddr, &info, info_size))
|
|
+ ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, NULL))
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
@@ -859,7 +974,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
|
|
info_size = (offsetof(struct ethtool_rxnfc, data) +
|
|
sizeof(info.data));
|
|
|
|
- if (copy_from_user(&info, useraddr, info_size))
|
|
+ if (ethtool_rxnfc_copy_from_user(&info, useraddr, info_size))
|
|
return -EFAULT;
|
|
|
|
/* If FLOW_RSS was requested then user-space must be using the
|
|
@@ -867,7 +982,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
|
|
*/
|
|
if (cmd == ETHTOOL_GRXFH && info.flow_type & FLOW_RSS) {
|
|
info_size = sizeof(info);
|
|
- if (copy_from_user(&info, useraddr, info_size))
|
|
+ if (ethtool_rxnfc_copy_from_user(&info, useraddr, info_size))
|
|
return -EFAULT;
|
|
/* Since malicious users may modify the original data,
|
|
* we need to check whether FLOW_RSS is still requested.
|
|
@@ -893,18 +1008,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
|
|
if (ret < 0)
|
|
goto err_out;
|
|
|
|
- ret = -EFAULT;
|
|
- if (copy_to_user(useraddr, &info, info_size))
|
|
- goto err_out;
|
|
-
|
|
- if (rule_buf) {
|
|
- useraddr += offsetof(struct ethtool_rxnfc, rule_locs);
|
|
- if (copy_to_user(useraddr, rule_buf,
|
|
- info.rule_cnt * sizeof(u32)))
|
|
- goto err_out;
|
|
- }
|
|
- ret = 0;
|
|
-
|
|
+ ret = ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, rule_buf);
|
|
err_out:
|
|
kfree(rule_buf);
|
|
|
|
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
|
|
index 560d5dc435629..10d4cde31c6bf 100644
|
|
--- a/net/ipv4/ip_output.c
|
|
+++ b/net/ipv4/ip_output.c
|
|
@@ -445,8 +445,9 @@ static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
|
|
{
|
|
BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
|
|
offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
|
|
- memcpy(&iph->saddr, &fl4->saddr,
|
|
- sizeof(fl4->saddr) + sizeof(fl4->daddr));
|
|
+
|
|
+ iph->saddr = fl4->saddr;
|
|
+ iph->daddr = fl4->daddr;
|
|
}
|
|
|
|
/* Note: skb->sk can be different from sk, in case of tunnels */
|
|
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
|
|
index d49709ba8e165..1071119843843 100644
|
|
--- a/net/ipv4/tcp_fastopen.c
|
|
+++ b/net/ipv4/tcp_fastopen.c
|
|
@@ -379,8 +379,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
|
|
return NULL;
|
|
}
|
|
|
|
- if (syn_data &&
|
|
- tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
|
|
+ if (tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
|
|
goto fastopen;
|
|
|
|
if (foc->len == 0) {
|
|
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
|
|
index 30589b4c09da4..3a15ef8dd3228 100644
|
|
--- a/net/mac80211/iface.c
|
|
+++ b/net/mac80211/iface.c
|
|
@@ -2000,9 +2000,16 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
|
|
|
|
netdev_set_default_ethtool_ops(ndev, &ieee80211_ethtool_ops);
|
|
|
|
- /* MTU range: 256 - 2304 */
|
|
+ /* MTU range is normally 256 - 2304, where the upper limit is
|
|
+ * the maximum MSDU size. Monitor interfaces send and receive
|
|
+ * MPDU and A-MSDU frames which may be much larger so we do
|
|
+ * not impose an upper limit in that case.
|
|
+ */
|
|
ndev->min_mtu = 256;
|
|
- ndev->max_mtu = local->hw.max_mtu;
|
|
+ if (type == NL80211_IFTYPE_MONITOR)
|
|
+ ndev->max_mtu = 0;
|
|
+ else
|
|
+ ndev->max_mtu = local->hw.max_mtu;
|
|
|
|
ret = register_netdevice(ndev);
|
|
if (ret) {
|
|
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
|
|
index 92047cea3c170..a6b654b028dd4 100644
|
|
--- a/net/netfilter/nf_flow_table_offload.c
|
|
+++ b/net/netfilter/nf_flow_table_offload.c
|
|
@@ -940,6 +940,7 @@ static void nf_flow_table_block_offload_init(struct flow_block_offload *bo,
|
|
bo->command = cmd;
|
|
bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
|
|
bo->extack = extack;
|
|
+ bo->cb_list_head = &flowtable->flow_block.cb_list;
|
|
INIT_LIST_HEAD(&bo->cb_list);
|
|
}
|
|
|
|
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
|
|
index 9ce776175214c..e5fcbb0e4b8e5 100644
|
|
--- a/net/netfilter/nf_tables_offload.c
|
|
+++ b/net/netfilter/nf_tables_offload.c
|
|
@@ -323,6 +323,7 @@ static void nft_flow_block_offload_init(struct flow_block_offload *bo,
|
|
bo->command = cmd;
|
|
bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
|
|
bo->extack = extack;
|
|
+ bo->cb_list_head = &basechain->flow_block.cb_list;
|
|
INIT_LIST_HEAD(&bo->cb_list);
|
|
}
|
|
|
|
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
|
|
index 50f40943c8153..f3f1df1b0f8e2 100644
|
|
--- a/net/netlabel/netlabel_cipso_v4.c
|
|
+++ b/net/netlabel/netlabel_cipso_v4.c
|
|
@@ -144,8 +144,8 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
|
|
return -ENOMEM;
|
|
doi_def->map.std = kzalloc(sizeof(*doi_def->map.std), GFP_KERNEL);
|
|
if (doi_def->map.std == NULL) {
|
|
- ret_val = -ENOMEM;
|
|
- goto add_std_failure;
|
|
+ kfree(doi_def);
|
|
+ return -ENOMEM;
|
|
}
|
|
doi_def->type = CIPSO_V4_MAP_TRANS;
|
|
|
|
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
|
|
index e527f5686e2bf..8434da3c0487a 100644
|
|
--- a/net/netlink/af_netlink.c
|
|
+++ b/net/netlink/af_netlink.c
|
|
@@ -2537,13 +2537,15 @@ int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
|
|
/* errors reported via destination sk->sk_err, but propagate
|
|
* delivery errors if NETLINK_BROADCAST_ERROR flag is set */
|
|
err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
|
|
+ if (err == -ESRCH)
|
|
+ err = 0;
|
|
}
|
|
|
|
if (report) {
|
|
int err2;
|
|
|
|
err2 = nlmsg_unicast(sk, skb, portid);
|
|
- if (!err || err == -ESRCH)
|
|
+ if (!err)
|
|
err = err2;
|
|
}
|
|
|
|
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
|
|
index 31ac76a9189ee..8073657a0fd25 100644
|
|
--- a/net/sched/cls_api.c
|
|
+++ b/net/sched/cls_api.c
|
|
@@ -634,6 +634,7 @@ static void tcf_block_offload_init(struct flow_block_offload *bo,
|
|
bo->block_shared = shared;
|
|
bo->extack = extack;
|
|
bo->sch = sch;
|
|
+ bo->cb_list_head = &flow_block->cb_list;
|
|
INIT_LIST_HEAD(&bo->cb_list);
|
|
}
|
|
|
|
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
|
|
index 00853065dfa06..cb5e5220da552 100644
|
|
--- a/net/sched/sch_taprio.c
|
|
+++ b/net/sched/sch_taprio.c
|
|
@@ -1502,7 +1502,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
|
|
taprio_set_picos_per_byte(dev, q);
|
|
|
|
if (mqprio) {
|
|
- netdev_set_num_tc(dev, mqprio->num_tc);
|
|
+ err = netdev_set_num_tc(dev, mqprio->num_tc);
|
|
+ if (err)
|
|
+ goto free_sched;
|
|
for (i = 0; i < mqprio->num_tc; i++)
|
|
netdev_set_tc_queue(dev, i,
|
|
mqprio->count[i],
|
|
diff --git a/net/socket.c b/net/socket.c
|
|
index dd5da07bc1ffc..d52c265ad449b 100644
|
|
--- a/net/socket.c
|
|
+++ b/net/socket.c
|
|
@@ -3112,128 +3112,6 @@ static int compat_dev_ifconf(struct net *net, struct compat_ifconf __user *uifc3
|
|
return 0;
|
|
}
|
|
|
|
-static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
|
|
-{
|
|
- struct compat_ethtool_rxnfc __user *compat_rxnfc;
|
|
- bool convert_in = false, convert_out = false;
|
|
- size_t buf_size = 0;
|
|
- struct ethtool_rxnfc __user *rxnfc = NULL;
|
|
- struct ifreq ifr;
|
|
- u32 rule_cnt = 0, actual_rule_cnt;
|
|
- u32 ethcmd;
|
|
- u32 data;
|
|
- int ret;
|
|
-
|
|
- if (get_user(data, &ifr32->ifr_ifru.ifru_data))
|
|
- return -EFAULT;
|
|
-
|
|
- compat_rxnfc = compat_ptr(data);
|
|
-
|
|
- if (get_user(ethcmd, &compat_rxnfc->cmd))
|
|
- return -EFAULT;
|
|
-
|
|
- /* Most ethtool structures are defined without padding.
|
|
- * Unfortunately struct ethtool_rxnfc is an exception.
|
|
- */
|
|
- switch (ethcmd) {
|
|
- default:
|
|
- break;
|
|
- case ETHTOOL_GRXCLSRLALL:
|
|
- /* Buffer size is variable */
|
|
- if (get_user(rule_cnt, &compat_rxnfc->rule_cnt))
|
|
- return -EFAULT;
|
|
- if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32))
|
|
- return -ENOMEM;
|
|
- buf_size += rule_cnt * sizeof(u32);
|
|
- fallthrough;
|
|
- case ETHTOOL_GRXRINGS:
|
|
- case ETHTOOL_GRXCLSRLCNT:
|
|
- case ETHTOOL_GRXCLSRULE:
|
|
- case ETHTOOL_SRXCLSRLINS:
|
|
- convert_out = true;
|
|
- fallthrough;
|
|
- case ETHTOOL_SRXCLSRLDEL:
|
|
- buf_size += sizeof(struct ethtool_rxnfc);
|
|
- convert_in = true;
|
|
- rxnfc = compat_alloc_user_space(buf_size);
|
|
- break;
|
|
- }
|
|
-
|
|
- if (copy_from_user(&ifr.ifr_name, &ifr32->ifr_name, IFNAMSIZ))
|
|
- return -EFAULT;
|
|
-
|
|
- ifr.ifr_data = convert_in ? rxnfc : (void __user *)compat_rxnfc;
|
|
-
|
|
- if (convert_in) {
|
|
- /* We expect there to be holes between fs.m_ext and
|
|
- * fs.ring_cookie and at the end of fs, but nowhere else.
|
|
- */
|
|
- BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) +
|
|
- sizeof(compat_rxnfc->fs.m_ext) !=
|
|
- offsetof(struct ethtool_rxnfc, fs.m_ext) +
|
|
- sizeof(rxnfc->fs.m_ext));
|
|
- BUILD_BUG_ON(
|
|
- offsetof(struct compat_ethtool_rxnfc, fs.location) -
|
|
- offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) !=
|
|
- offsetof(struct ethtool_rxnfc, fs.location) -
|
|
- offsetof(struct ethtool_rxnfc, fs.ring_cookie));
|
|
-
|
|
- if (copy_in_user(rxnfc, compat_rxnfc,
|
|
- (void __user *)(&rxnfc->fs.m_ext + 1) -
|
|
- (void __user *)rxnfc) ||
|
|
- copy_in_user(&rxnfc->fs.ring_cookie,
|
|
- &compat_rxnfc->fs.ring_cookie,
|
|
- (void __user *)(&rxnfc->fs.location + 1) -
|
|
- (void __user *)&rxnfc->fs.ring_cookie))
|
|
- return -EFAULT;
|
|
- if (ethcmd == ETHTOOL_GRXCLSRLALL) {
|
|
- if (put_user(rule_cnt, &rxnfc->rule_cnt))
|
|
- return -EFAULT;
|
|
- } else if (copy_in_user(&rxnfc->rule_cnt,
|
|
- &compat_rxnfc->rule_cnt,
|
|
- sizeof(rxnfc->rule_cnt)))
|
|
- return -EFAULT;
|
|
- }
|
|
-
|
|
- ret = dev_ioctl(net, SIOCETHTOOL, &ifr, NULL);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- if (convert_out) {
|
|
- if (copy_in_user(compat_rxnfc, rxnfc,
|
|
- (const void __user *)(&rxnfc->fs.m_ext + 1) -
|
|
- (const void __user *)rxnfc) ||
|
|
- copy_in_user(&compat_rxnfc->fs.ring_cookie,
|
|
- &rxnfc->fs.ring_cookie,
|
|
- (const void __user *)(&rxnfc->fs.location + 1) -
|
|
- (const void __user *)&rxnfc->fs.ring_cookie) ||
|
|
- copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
|
|
- sizeof(rxnfc->rule_cnt)))
|
|
- return -EFAULT;
|
|
-
|
|
- if (ethcmd == ETHTOOL_GRXCLSRLALL) {
|
|
- /* As an optimisation, we only copy the actual
|
|
- * number of rules that the underlying
|
|
- * function returned. Since Mallory might
|
|
- * change the rule count in user memory, we
|
|
- * check that it is less than the rule count
|
|
- * originally given (as the user buffer size),
|
|
- * which has been range-checked.
|
|
- */
|
|
- if (get_user(actual_rule_cnt, &rxnfc->rule_cnt))
|
|
- return -EFAULT;
|
|
- if (actual_rule_cnt < rule_cnt)
|
|
- rule_cnt = actual_rule_cnt;
|
|
- if (copy_in_user(&compat_rxnfc->rule_locs[0],
|
|
- &rxnfc->rule_locs[0],
|
|
- rule_cnt * sizeof(u32)))
|
|
- return -EFAULT;
|
|
- }
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32)
|
|
{
|
|
compat_uptr_t uptr32;
|
|
@@ -3390,8 +3268,6 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
|
|
return old_bridge_ioctl(argp);
|
|
case SIOCGIFCONF:
|
|
return compat_dev_ifconf(net, argp);
|
|
- case SIOCETHTOOL:
|
|
- return ethtool_ioctl(net, argp);
|
|
case SIOCWANDEV:
|
|
return compat_siocwandev(net, argp);
|
|
case SIOCGIFMAP:
|
|
@@ -3404,6 +3280,7 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
|
|
return sock->ops->gettstamp(sock, argp, cmd == SIOCGSTAMP_OLD,
|
|
!COMPAT_USE_64BIT_TIME);
|
|
|
|
+ case SIOCETHTOOL:
|
|
case SIOCBONDSLAVEINFOQUERY:
|
|
case SIOCBONDINFOQUERY:
|
|
case SIOCSHWTSTAMP:
|
|
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
|
|
index 6dff64374bfe1..e22f2d65457da 100644
|
|
--- a/net/sunrpc/auth_gss/svcauth_gss.c
|
|
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
|
|
@@ -1980,7 +1980,7 @@ gss_svc_init_net(struct net *net)
|
|
goto out2;
|
|
return 0;
|
|
out2:
|
|
- destroy_use_gss_proxy_proc_entry(net);
|
|
+ rsi_cache_destroy_net(net);
|
|
out1:
|
|
rsc_cache_destroy_net(net);
|
|
return rv;
|
|
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
|
|
index 9a50764be9160..8201531ce5d97 100644
|
|
--- a/net/sunrpc/xprt.c
|
|
+++ b/net/sunrpc/xprt.c
|
|
@@ -746,9 +746,9 @@ void xprt_force_disconnect(struct rpc_xprt *xprt)
|
|
/* Try to schedule an autoclose RPC call */
|
|
if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
|
|
queue_work(xprtiod_workqueue, &xprt->task_cleanup);
|
|
- else if (xprt->snd_task)
|
|
+ else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
|
|
rpc_wake_up_queued_task_set_status(&xprt->pending,
|
|
- xprt->snd_task, -ENOTCONN);
|
|
+ xprt->snd_task, -ENOTCONN);
|
|
spin_unlock(&xprt->transport_lock);
|
|
}
|
|
EXPORT_SYMBOL_GPL(xprt_force_disconnect);
|
|
@@ -837,12 +837,14 @@ bool xprt_lock_connect(struct rpc_xprt *xprt,
|
|
goto out;
|
|
if (xprt->snd_task != task)
|
|
goto out;
|
|
+ set_bit(XPRT_SND_IS_COOKIE, &xprt->state);
|
|
xprt->snd_task = cookie;
|
|
ret = true;
|
|
out:
|
|
spin_unlock(&xprt->transport_lock);
|
|
return ret;
|
|
}
|
|
+EXPORT_SYMBOL_GPL(xprt_lock_connect);
|
|
|
|
void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
|
|
{
|
|
@@ -852,12 +854,14 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
|
|
if (!test_bit(XPRT_LOCKED, &xprt->state))
|
|
goto out;
|
|
xprt->snd_task =NULL;
|
|
+ clear_bit(XPRT_SND_IS_COOKIE, &xprt->state);
|
|
xprt->ops->release_xprt(xprt, NULL);
|
|
xprt_schedule_autodisconnect(xprt);
|
|
out:
|
|
spin_unlock(&xprt->transport_lock);
|
|
wake_up_bit(&xprt->state, XPRT_LOCKED);
|
|
}
|
|
+EXPORT_SYMBOL_GPL(xprt_unlock_connect);
|
|
|
|
/**
|
|
* xprt_connect - schedule a transport connect operation
|
|
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
|
|
index c26db0a379967..8e2368a0c2a29 100644
|
|
--- a/net/sunrpc/xprtrdma/transport.c
|
|
+++ b/net/sunrpc/xprtrdma/transport.c
|
|
@@ -249,12 +249,9 @@ xprt_rdma_connect_worker(struct work_struct *work)
|
|
xprt->stat.connect_start;
|
|
xprt_set_connected(xprt);
|
|
rc = -EAGAIN;
|
|
- } else {
|
|
- /* Force a call to xprt_rdma_close to clean up */
|
|
- spin_lock(&xprt->transport_lock);
|
|
- set_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
|
- spin_unlock(&xprt->transport_lock);
|
|
- }
|
|
+ } else
|
|
+ rpcrdma_xprt_disconnect(r_xprt);
|
|
+ xprt_unlock_connect(xprt, r_xprt);
|
|
xprt_wake_pending_tasks(xprt, rc);
|
|
}
|
|
|
|
@@ -487,6 +484,8 @@ xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
|
|
struct rpcrdma_ep *ep = r_xprt->rx_ep;
|
|
unsigned long delay;
|
|
|
|
+ WARN_ON_ONCE(!xprt_lock_connect(xprt, task, r_xprt));
|
|
+
|
|
delay = 0;
|
|
if (ep && ep->re_connect_status != 0) {
|
|
delay = xprt_reconnect_delay(xprt);
|
|
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
|
|
index 9c0f71e82d978..16c7758e7bf30 100644
|
|
--- a/net/sunrpc/xprtsock.c
|
|
+++ b/net/sunrpc/xprtsock.c
|
|
@@ -1639,6 +1639,13 @@ static int xs_get_srcport(struct sock_xprt *transport)
|
|
return port;
|
|
}
|
|
|
|
+unsigned short get_srcport(struct rpc_xprt *xprt)
|
|
+{
|
|
+ struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt);
|
|
+ return xs_sock_getport(sock->sock);
|
|
+}
|
|
+EXPORT_SYMBOL(get_srcport);
|
|
+
|
|
static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
|
|
{
|
|
if (transport->srcport != 0)
|
|
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
|
|
index 9bd72468bc68e..963047c57c27b 100644
|
|
--- a/net/tipc/socket.c
|
|
+++ b/net/tipc/socket.c
|
|
@@ -1887,6 +1887,7 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
|
|
bool connected = !tipc_sk_type_connectionless(sk);
|
|
struct tipc_sock *tsk = tipc_sk(sk);
|
|
int rc, err, hlen, dlen, copy;
|
|
+ struct tipc_skb_cb *skb_cb;
|
|
struct sk_buff_head xmitq;
|
|
struct tipc_msg *hdr;
|
|
struct sk_buff *skb;
|
|
@@ -1910,6 +1911,7 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
|
|
if (unlikely(rc))
|
|
goto exit;
|
|
skb = skb_peek(&sk->sk_receive_queue);
|
|
+ skb_cb = TIPC_SKB_CB(skb);
|
|
hdr = buf_msg(skb);
|
|
dlen = msg_data_sz(hdr);
|
|
hlen = msg_hdr_sz(hdr);
|
|
@@ -1929,18 +1931,33 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
|
|
|
|
/* Capture data if non-error msg, otherwise just set return value */
|
|
if (likely(!err)) {
|
|
- copy = min_t(int, dlen, buflen);
|
|
- if (unlikely(copy != dlen))
|
|
- m->msg_flags |= MSG_TRUNC;
|
|
- rc = skb_copy_datagram_msg(skb, hlen, m, copy);
|
|
+ int offset = skb_cb->bytes_read;
|
|
+
|
|
+ copy = min_t(int, dlen - offset, buflen);
|
|
+ rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
|
|
+ if (unlikely(rc))
|
|
+ goto exit;
|
|
+ if (unlikely(offset + copy < dlen)) {
|
|
+ if (flags & MSG_EOR) {
|
|
+ if (!(flags & MSG_PEEK))
|
|
+ skb_cb->bytes_read = offset + copy;
|
|
+ } else {
|
|
+ m->msg_flags |= MSG_TRUNC;
|
|
+ skb_cb->bytes_read = 0;
|
|
+ }
|
|
+ } else {
|
|
+ if (flags & MSG_EOR)
|
|
+ m->msg_flags |= MSG_EOR;
|
|
+ skb_cb->bytes_read = 0;
|
|
+ }
|
|
} else {
|
|
copy = 0;
|
|
rc = 0;
|
|
- if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
|
|
+ if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) {
|
|
rc = -ECONNRESET;
|
|
+ goto exit;
|
|
+ }
|
|
}
|
|
- if (unlikely(rc))
|
|
- goto exit;
|
|
|
|
/* Mark message as group event if applicable */
|
|
if (unlikely(grp_evt)) {
|
|
@@ -1963,9 +1980,10 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
|
|
tipc_node_distr_xmit(sock_net(sk), &xmitq);
|
|
}
|
|
|
|
- tsk_advance_rx_queue(sk);
|
|
+ if (!skb_cb->bytes_read)
|
|
+ tsk_advance_rx_queue(sk);
|
|
|
|
- if (likely(!connected))
|
|
+ if (likely(!connected) || skb_cb->bytes_read)
|
|
goto exit;
|
|
|
|
/* Send connection flow control advertisement when applicable */
|
|
diff --git a/samples/bpf/test_override_return.sh b/samples/bpf/test_override_return.sh
|
|
index e68b9ee6814b8..35db26f736b9d 100755
|
|
--- a/samples/bpf/test_override_return.sh
|
|
+++ b/samples/bpf/test_override_return.sh
|
|
@@ -1,5 +1,6 @@
|
|
#!/bin/bash
|
|
|
|
+rm -r tmpmnt
|
|
rm -f testfile.img
|
|
dd if=/dev/zero of=testfile.img bs=1M seek=1000 count=1
|
|
DEVICE=$(losetup --show -f testfile.img)
|
|
diff --git a/samples/bpf/tracex7_user.c b/samples/bpf/tracex7_user.c
|
|
index fdcd6580dd736..8be7ce18d3ba0 100644
|
|
--- a/samples/bpf/tracex7_user.c
|
|
+++ b/samples/bpf/tracex7_user.c
|
|
@@ -14,6 +14,11 @@ int main(int argc, char **argv)
|
|
int ret = 0;
|
|
FILE *f;
|
|
|
|
+ if (!argv[1]) {
|
|
+ fprintf(stderr, "ERROR: Run with the btrfs device argument!\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
|
|
obj = bpf_object__open_file(filename, NULL);
|
|
if (libbpf_get_error(obj)) {
|
|
diff --git a/scripts/gen_ksymdeps.sh b/scripts/gen_ksymdeps.sh
|
|
index 1324986e1362c..725e8c9c1b53f 100755
|
|
--- a/scripts/gen_ksymdeps.sh
|
|
+++ b/scripts/gen_ksymdeps.sh
|
|
@@ -4,7 +4,13 @@
|
|
set -e
|
|
|
|
# List of exported symbols
|
|
-ksyms=$($NM $1 | sed -n 's/.*__ksym_marker_\(.*\)/\1/p' | tr A-Z a-z)
|
|
+#
|
|
+# If the object has no symbol, $NM warns 'no symbols'.
|
|
+# Suppress the stderr.
|
|
+# TODO:
|
|
+# Use -q instead of 2>/dev/null when we upgrade the minimum version of
|
|
+# binutils to 2.37, llvm to 13.0.0.
|
|
+ksyms=$($NM $1 2>/dev/null | sed -n 's/.*__ksym_marker_\(.*\)/\1/p' | tr A-Z a-z)
|
|
|
|
if [ -z "$ksyms" ]; then
|
|
exit 0
|
|
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
|
|
index 7eabb448acab4..169929c6c4eb3 100644
|
|
--- a/security/smack/smack_access.c
|
|
+++ b/security/smack/smack_access.c
|
|
@@ -81,23 +81,22 @@ int log_policy = SMACK_AUDIT_DENIED;
|
|
int smk_access_entry(char *subject_label, char *object_label,
|
|
struct list_head *rule_list)
|
|
{
|
|
- int may = -ENOENT;
|
|
struct smack_rule *srp;
|
|
|
|
list_for_each_entry_rcu(srp, rule_list, list) {
|
|
if (srp->smk_object->smk_known == object_label &&
|
|
srp->smk_subject->smk_known == subject_label) {
|
|
- may = srp->smk_access;
|
|
- break;
|
|
+ int may = srp->smk_access;
|
|
+ /*
|
|
+ * MAY_WRITE implies MAY_LOCK.
|
|
+ */
|
|
+ if ((may & MAY_WRITE) == MAY_WRITE)
|
|
+ may |= MAY_LOCK;
|
|
+ return may;
|
|
}
|
|
}
|
|
|
|
- /*
|
|
- * MAY_WRITE implies MAY_LOCK.
|
|
- */
|
|
- if ((may & MAY_WRITE) == MAY_WRITE)
|
|
- may |= MAY_LOCK;
|
|
- return may;
|
|
+ return -ENOENT;
|
|
}
|
|
|
|
/**
|
|
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
|
|
index 142373ec411ad..89210048e6c2b 100644
|
|
--- a/sound/soc/atmel/Kconfig
|
|
+++ b/sound/soc/atmel/Kconfig
|
|
@@ -11,7 +11,6 @@ if SND_ATMEL_SOC
|
|
|
|
config SND_ATMEL_SOC_PDC
|
|
bool
|
|
- depends on HAS_DMA
|
|
|
|
config SND_ATMEL_SOC_DMA
|
|
bool
|
|
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
|
|
index ca14730232ba9..43ee3d095a1be 100644
|
|
--- a/sound/soc/intel/boards/bytcr_rt5640.c
|
|
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
|
|
@@ -286,9 +286,6 @@ static const struct snd_soc_dapm_widget byt_rt5640_widgets[] = {
|
|
static const struct snd_soc_dapm_route byt_rt5640_audio_map[] = {
|
|
{"Headphone", NULL, "Platform Clock"},
|
|
{"Headset Mic", NULL, "Platform Clock"},
|
|
- {"Internal Mic", NULL, "Platform Clock"},
|
|
- {"Speaker", NULL, "Platform Clock"},
|
|
-
|
|
{"Headset Mic", NULL, "MICBIAS1"},
|
|
{"IN2P", NULL, "Headset Mic"},
|
|
{"Headphone", NULL, "HPOL"},
|
|
@@ -296,19 +293,23 @@ static const struct snd_soc_dapm_route byt_rt5640_audio_map[] = {
|
|
};
|
|
|
|
static const struct snd_soc_dapm_route byt_rt5640_intmic_dmic1_map[] = {
|
|
+ {"Internal Mic", NULL, "Platform Clock"},
|
|
{"DMIC1", NULL, "Internal Mic"},
|
|
};
|
|
|
|
static const struct snd_soc_dapm_route byt_rt5640_intmic_dmic2_map[] = {
|
|
+ {"Internal Mic", NULL, "Platform Clock"},
|
|
{"DMIC2", NULL, "Internal Mic"},
|
|
};
|
|
|
|
static const struct snd_soc_dapm_route byt_rt5640_intmic_in1_map[] = {
|
|
+ {"Internal Mic", NULL, "Platform Clock"},
|
|
{"Internal Mic", NULL, "MICBIAS1"},
|
|
{"IN1P", NULL, "Internal Mic"},
|
|
};
|
|
|
|
static const struct snd_soc_dapm_route byt_rt5640_intmic_in3_map[] = {
|
|
+ {"Internal Mic", NULL, "Platform Clock"},
|
|
{"Internal Mic", NULL, "MICBIAS1"},
|
|
{"IN3P", NULL, "Internal Mic"},
|
|
};
|
|
@@ -350,6 +351,7 @@ static const struct snd_soc_dapm_route byt_rt5640_ssp0_aif2_map[] = {
|
|
};
|
|
|
|
static const struct snd_soc_dapm_route byt_rt5640_stereo_spk_map[] = {
|
|
+ {"Speaker", NULL, "Platform Clock"},
|
|
{"Speaker", NULL, "SPOLP"},
|
|
{"Speaker", NULL, "SPOLN"},
|
|
{"Speaker", NULL, "SPORP"},
|
|
@@ -357,6 +359,7 @@ static const struct snd_soc_dapm_route byt_rt5640_stereo_spk_map[] = {
|
|
};
|
|
|
|
static const struct snd_soc_dapm_route byt_rt5640_mono_spk_map[] = {
|
|
+ {"Speaker", NULL, "Platform Clock"},
|
|
{"Speaker", NULL, "SPOLP"},
|
|
{"Speaker", NULL, "SPOLN"},
|
|
};
|
|
diff --git a/sound/soc/intel/boards/sof_pcm512x.c b/sound/soc/intel/boards/sof_pcm512x.c
|
|
index d2b0456236c72..bdd671f07659c 100644
|
|
--- a/sound/soc/intel/boards/sof_pcm512x.c
|
|
+++ b/sound/soc/intel/boards/sof_pcm512x.c
|
|
@@ -26,11 +26,16 @@
|
|
|
|
#define SOF_PCM512X_SSP_CODEC(quirk) ((quirk) & GENMASK(3, 0))
|
|
#define SOF_PCM512X_SSP_CODEC_MASK (GENMASK(3, 0))
|
|
+#define SOF_PCM512X_ENABLE_SSP_CAPTURE BIT(4)
|
|
+#define SOF_PCM512X_ENABLE_DMIC BIT(5)
|
|
|
|
#define IDISP_CODEC_MASK 0x4
|
|
|
|
/* Default: SSP5 */
|
|
-static unsigned long sof_pcm512x_quirk = SOF_PCM512X_SSP_CODEC(5);
|
|
+static unsigned long sof_pcm512x_quirk =
|
|
+ SOF_PCM512X_SSP_CODEC(5) |
|
|
+ SOF_PCM512X_ENABLE_SSP_CAPTURE |
|
|
+ SOF_PCM512X_ENABLE_DMIC;
|
|
|
|
static bool is_legacy_cpu;
|
|
|
|
@@ -245,8 +250,9 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
|
|
links[id].dpcm_playback = 1;
|
|
/*
|
|
* capture only supported with specific versions of the Hifiberry DAC+
|
|
- * links[id].dpcm_capture = 1;
|
|
*/
|
|
+ if (sof_pcm512x_quirk & SOF_PCM512X_ENABLE_SSP_CAPTURE)
|
|
+ links[id].dpcm_capture = 1;
|
|
links[id].no_pcm = 1;
|
|
links[id].cpus = &cpus[id];
|
|
links[id].num_cpus = 1;
|
|
@@ -381,6 +387,9 @@ static int sof_audio_probe(struct platform_device *pdev)
|
|
|
|
ssp_codec = sof_pcm512x_quirk & SOF_PCM512X_SSP_CODEC_MASK;
|
|
|
|
+ if (!(sof_pcm512x_quirk & SOF_PCM512X_ENABLE_DMIC))
|
|
+ dmic_be_num = 0;
|
|
+
|
|
/* compute number of dai links */
|
|
sof_audio_card_pcm512x.num_links = 1 + dmic_be_num + hdmi_num;
|
|
|
|
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
|
|
index 476ef1897961d..79c6cf2c14bfb 100644
|
|
--- a/sound/soc/intel/skylake/skl-messages.c
|
|
+++ b/sound/soc/intel/skylake/skl-messages.c
|
|
@@ -802,9 +802,12 @@ static u16 skl_get_module_param_size(struct skl_dev *skl,
|
|
|
|
case SKL_MODULE_TYPE_BASE_OUTFMT:
|
|
case SKL_MODULE_TYPE_MIC_SELECT:
|
|
- case SKL_MODULE_TYPE_KPB:
|
|
return sizeof(struct skl_base_outfmt_cfg);
|
|
|
|
+ case SKL_MODULE_TYPE_MIXER:
|
|
+ case SKL_MODULE_TYPE_KPB:
|
|
+ return sizeof(struct skl_base_cfg);
|
|
+
|
|
default:
|
|
/*
|
|
* return only base cfg when no specific module type is
|
|
@@ -857,10 +860,14 @@ static int skl_set_module_format(struct skl_dev *skl,
|
|
|
|
case SKL_MODULE_TYPE_BASE_OUTFMT:
|
|
case SKL_MODULE_TYPE_MIC_SELECT:
|
|
- case SKL_MODULE_TYPE_KPB:
|
|
skl_set_base_outfmt_format(skl, module_config, *param_data);
|
|
break;
|
|
|
|
+ case SKL_MODULE_TYPE_MIXER:
|
|
+ case SKL_MODULE_TYPE_KPB:
|
|
+ skl_set_base_module_format(skl, module_config, *param_data);
|
|
+ break;
|
|
+
|
|
default:
|
|
skl_set_base_module_format(skl, module_config, *param_data);
|
|
break;
|
|
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
|
|
index bbe8d782e0af6..b1897a057397d 100644
|
|
--- a/sound/soc/intel/skylake/skl-pcm.c
|
|
+++ b/sound/soc/intel/skylake/skl-pcm.c
|
|
@@ -1318,21 +1318,6 @@ static int skl_get_module_info(struct skl_dev *skl,
|
|
return -EIO;
|
|
}
|
|
|
|
- list_for_each_entry(module, &skl->uuid_list, list) {
|
|
- if (guid_equal(uuid_mod, &module->uuid)) {
|
|
- mconfig->id.module_id = module->id;
|
|
- if (mconfig->module)
|
|
- mconfig->module->loadable = module->is_loadable;
|
|
- ret = 0;
|
|
- break;
|
|
- }
|
|
- }
|
|
-
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- uuid_mod = &module->uuid;
|
|
- ret = -EIO;
|
|
for (i = 0; i < skl->nr_modules; i++) {
|
|
skl_module = skl->modules[i];
|
|
uuid_tplg = &skl_module->uuid;
|
|
@@ -1342,10 +1327,18 @@ static int skl_get_module_info(struct skl_dev *skl,
|
|
break;
|
|
}
|
|
}
|
|
+
|
|
if (skl->nr_modules && ret)
|
|
return ret;
|
|
|
|
+ ret = -EIO;
|
|
list_for_each_entry(module, &skl->uuid_list, list) {
|
|
+ if (guid_equal(uuid_mod, &module->uuid)) {
|
|
+ mconfig->id.module_id = module->id;
|
|
+ mconfig->module->loadable = module->is_loadable;
|
|
+ ret = 0;
|
|
+ }
|
|
+
|
|
for (i = 0; i < MAX_IN_QUEUE; i++) {
|
|
pin_id = &mconfig->m_in_pin[i].id;
|
|
if (guid_equal(&pin_id->mod_uuid, &module->uuid))
|
|
@@ -1359,7 +1352,7 @@ static int skl_get_module_info(struct skl_dev *skl,
|
|
}
|
|
}
|
|
|
|
- return 0;
|
|
+ return ret;
|
|
}
|
|
|
|
static int skl_populate_modules(struct skl_dev *skl)
|
|
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
|
|
index 593299675b8c7..fa84ec695b525 100644
|
|
--- a/sound/soc/rockchip/rockchip_i2s.c
|
|
+++ b/sound/soc/rockchip/rockchip_i2s.c
|
|
@@ -186,7 +186,9 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
|
|
{
|
|
struct rk_i2s_dev *i2s = to_info(cpu_dai);
|
|
unsigned int mask = 0, val = 0;
|
|
+ int ret = 0;
|
|
|
|
+ pm_runtime_get_sync(cpu_dai->dev);
|
|
mask = I2S_CKR_MSS_MASK;
|
|
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
|
|
case SND_SOC_DAIFMT_CBS_CFS:
|
|
@@ -199,7 +201,8 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
|
|
i2s->is_master_mode = false;
|
|
break;
|
|
default:
|
|
- return -EINVAL;
|
|
+ ret = -EINVAL;
|
|
+ goto err_pm_put;
|
|
}
|
|
|
|
regmap_update_bits(i2s->regmap, I2S_CKR, mask, val);
|
|
@@ -213,7 +216,8 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
|
|
val = I2S_CKR_CKP_POS;
|
|
break;
|
|
default:
|
|
- return -EINVAL;
|
|
+ ret = -EINVAL;
|
|
+ goto err_pm_put;
|
|
}
|
|
|
|
regmap_update_bits(i2s->regmap, I2S_CKR, mask, val);
|
|
@@ -229,14 +233,15 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
|
|
case SND_SOC_DAIFMT_I2S:
|
|
val = I2S_TXCR_IBM_NORMAL;
|
|
break;
|
|
- case SND_SOC_DAIFMT_DSP_A: /* PCM no delay mode */
|
|
- val = I2S_TXCR_TFS_PCM;
|
|
- break;
|
|
- case SND_SOC_DAIFMT_DSP_B: /* PCM delay 1 mode */
|
|
+ case SND_SOC_DAIFMT_DSP_A: /* PCM delay 1 bit mode */
|
|
val = I2S_TXCR_TFS_PCM | I2S_TXCR_PBM_MODE(1);
|
|
break;
|
|
+ case SND_SOC_DAIFMT_DSP_B: /* PCM no delay mode */
|
|
+ val = I2S_TXCR_TFS_PCM;
|
|
+ break;
|
|
default:
|
|
- return -EINVAL;
|
|
+ ret = -EINVAL;
|
|
+ goto err_pm_put;
|
|
}
|
|
|
|
regmap_update_bits(i2s->regmap, I2S_TXCR, mask, val);
|
|
@@ -252,19 +257,23 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
|
|
case SND_SOC_DAIFMT_I2S:
|
|
val = I2S_RXCR_IBM_NORMAL;
|
|
break;
|
|
- case SND_SOC_DAIFMT_DSP_A: /* PCM no delay mode */
|
|
- val = I2S_RXCR_TFS_PCM;
|
|
- break;
|
|
- case SND_SOC_DAIFMT_DSP_B: /* PCM delay 1 mode */
|
|
+ case SND_SOC_DAIFMT_DSP_A: /* PCM delay 1 bit mode */
|
|
val = I2S_RXCR_TFS_PCM | I2S_RXCR_PBM_MODE(1);
|
|
break;
|
|
+ case SND_SOC_DAIFMT_DSP_B: /* PCM no delay mode */
|
|
+ val = I2S_RXCR_TFS_PCM;
|
|
+ break;
|
|
default:
|
|
- return -EINVAL;
|
|
+ ret = -EINVAL;
|
|
+ goto err_pm_put;
|
|
}
|
|
|
|
regmap_update_bits(i2s->regmap, I2S_RXCR, mask, val);
|
|
|
|
- return 0;
|
|
+err_pm_put:
|
|
+ pm_runtime_put(cpu_dai->dev);
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream,
|
|
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
|
|
index 28923b776cdc8..b337d6f29098b 100644
|
|
--- a/tools/lib/bpf/libbpf.c
|
|
+++ b/tools/lib/bpf/libbpf.c
|
|
@@ -3613,6 +3613,42 @@ static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
|
|
return 0;
|
|
}
|
|
|
|
+static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
|
|
+{
|
|
+ char file[PATH_MAX], buff[4096];
|
|
+ FILE *fp;
|
|
+ __u32 val;
|
|
+ int err;
|
|
+
|
|
+ snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
|
|
+ memset(info, 0, sizeof(*info));
|
|
+
|
|
+ fp = fopen(file, "r");
|
|
+ if (!fp) {
|
|
+ err = -errno;
|
|
+ pr_warn("failed to open %s: %d. No procfs support?\n", file,
|
|
+ err);
|
|
+ return err;
|
|
+ }
|
|
+
|
|
+ while (fgets(buff, sizeof(buff), fp)) {
|
|
+ if (sscanf(buff, "map_type:\t%u", &val) == 1)
|
|
+ info->type = val;
|
|
+ else if (sscanf(buff, "key_size:\t%u", &val) == 1)
|
|
+ info->key_size = val;
|
|
+ else if (sscanf(buff, "value_size:\t%u", &val) == 1)
|
|
+ info->value_size = val;
|
|
+ else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
|
|
+ info->max_entries = val;
|
|
+ else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
|
|
+ info->map_flags = val;
|
|
+ }
|
|
+
|
|
+ fclose(fp);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
int bpf_map__reuse_fd(struct bpf_map *map, int fd)
|
|
{
|
|
struct bpf_map_info info = {};
|
|
@@ -3621,6 +3657,8 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
|
|
char *new_name;
|
|
|
|
err = bpf_obj_get_info_by_fd(fd, &info, &len);
|
|
+ if (err && errno == EINVAL)
|
|
+ err = bpf_get_map_info_from_fdinfo(fd, &info);
|
|
if (err)
|
|
return err;
|
|
|
|
@@ -4032,12 +4070,16 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
|
|
struct bpf_map_info map_info = {};
|
|
char msg[STRERR_BUFSIZE];
|
|
__u32 map_info_len;
|
|
+ int err;
|
|
|
|
map_info_len = sizeof(map_info);
|
|
|
|
- if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
|
|
- pr_warn("failed to get map info for map FD %d: %s\n",
|
|
- map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
|
|
+ err = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len);
|
|
+ if (err && errno == EINVAL)
|
|
+ err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
|
|
+ if (err) {
|
|
+ pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
|
|
+ libbpf_strerror_r(errno, msg, sizeof(msg)));
|
|
return false;
|
|
}
|
|
|
|
@@ -4242,10 +4284,13 @@ bpf_object__create_maps(struct bpf_object *obj)
|
|
char *cp, errmsg[STRERR_BUFSIZE];
|
|
unsigned int i, j;
|
|
int err;
|
|
+ bool retried;
|
|
|
|
for (i = 0; i < obj->nr_maps; i++) {
|
|
map = &obj->maps[i];
|
|
|
|
+ retried = false;
|
|
+retry:
|
|
if (map->pin_path) {
|
|
err = bpf_object__reuse_map(map);
|
|
if (err) {
|
|
@@ -4253,6 +4298,12 @@ bpf_object__create_maps(struct bpf_object *obj)
|
|
map->name);
|
|
goto err_out;
|
|
}
|
|
+ if (retried && map->fd < 0) {
|
|
+ pr_warn("map '%s': cannot find pinned map\n",
|
|
+ map->name);
|
|
+ err = -ENOENT;
|
|
+ goto err_out;
|
|
+ }
|
|
}
|
|
|
|
if (map->fd >= 0) {
|
|
@@ -4286,9 +4337,13 @@ bpf_object__create_maps(struct bpf_object *obj)
|
|
if (map->pin_path && !map->pinned) {
|
|
err = bpf_map__pin(map, NULL);
|
|
if (err) {
|
|
+ zclose(map->fd);
|
|
+ if (!retried && err == -EEXIST) {
|
|
+ retried = true;
|
|
+ goto retry;
|
|
+ }
|
|
pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
|
|
map->name, map->pin_path, err);
|
|
- zclose(map->fd);
|
|
goto err_out;
|
|
}
|
|
}
|
|
diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c
|
|
index 70665ba88cbb1..2703bd628d06c 100644
|
|
--- a/tools/testing/selftests/arm64/mte/mte_common_util.c
|
|
+++ b/tools/testing/selftests/arm64/mte/mte_common_util.c
|
|
@@ -285,7 +285,7 @@ int mte_default_setup(void)
|
|
int ret;
|
|
|
|
if (!(hwcaps2 & HWCAP2_MTE)) {
|
|
- ksft_print_msg("FAIL: MTE features unavailable\n");
|
|
+ ksft_print_msg("SKIP: MTE features unavailable\n");
|
|
return KSFT_SKIP;
|
|
}
|
|
/* Get current mte mode */
|
|
diff --git a/tools/testing/selftests/arm64/pauth/pac.c b/tools/testing/selftests/arm64/pauth/pac.c
|
|
index 592fe538506e3..b743daa772f55 100644
|
|
--- a/tools/testing/selftests/arm64/pauth/pac.c
|
|
+++ b/tools/testing/selftests/arm64/pauth/pac.c
|
|
@@ -25,13 +25,15 @@
|
|
do { \
|
|
unsigned long hwcaps = getauxval(AT_HWCAP); \
|
|
/* data key instructions are not in NOP space. This prevents a SIGILL */ \
|
|
- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled"); \
|
|
+ if (!(hwcaps & HWCAP_PACA)) \
|
|
+ SKIP(return, "PAUTH not enabled"); \
|
|
} while (0)
|
|
#define ASSERT_GENERIC_PAUTH_ENABLED() \
|
|
do { \
|
|
unsigned long hwcaps = getauxval(AT_HWCAP); \
|
|
/* generic key instructions are not in NOP space. This prevents a SIGILL */ \
|
|
- ASSERT_NE(0, hwcaps & HWCAP_PACG) TH_LOG("Generic PAUTH not enabled"); \
|
|
+ if (!(hwcaps & HWCAP_PACG)) \
|
|
+ SKIP(return, "Generic PAUTH not enabled"); \
|
|
} while (0)
|
|
|
|
void sign_specific(struct signatures *sign, size_t val)
|
|
@@ -256,7 +258,7 @@ TEST(single_thread_different_keys)
|
|
unsigned long hwcaps = getauxval(AT_HWCAP);
|
|
|
|
/* generic and data key instructions are not in NOP space. This prevents a SIGILL */
|
|
- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled");
|
|
+ ASSERT_PAUTH_ENABLED();
|
|
if (!(hwcaps & HWCAP_PACG)) {
|
|
TH_LOG("WARNING: Generic PAUTH not enabled. Skipping generic key checks");
|
|
nkeys = NKEYS - 1;
|
|
@@ -299,7 +301,7 @@ TEST(exec_changed_keys)
|
|
unsigned long hwcaps = getauxval(AT_HWCAP);
|
|
|
|
/* generic and data key instructions are not in NOP space. This prevents a SIGILL */
|
|
- ASSERT_NE(0, hwcaps & HWCAP_PACA) TH_LOG("PAUTH not enabled");
|
|
+ ASSERT_PAUTH_ENABLED();
|
|
if (!(hwcaps & HWCAP_PACG)) {
|
|
TH_LOG("WARNING: Generic PAUTH not enabled. Skipping generic key checks");
|
|
nkeys = NKEYS - 1;
|
|
diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c
|
|
index 7043e6ded0e60..75b72c751772b 100644
|
|
--- a/tools/testing/selftests/bpf/prog_tests/send_signal.c
|
|
+++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c
|
|
@@ -1,5 +1,7 @@
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
#include <test_progs.h>
|
|
+#include <sys/time.h>
|
|
+#include <sys/resource.h>
|
|
#include "test_send_signal_kern.skel.h"
|
|
|
|
static volatile int sigusr1_received = 0;
|
|
@@ -41,12 +43,23 @@ static void test_send_signal_common(struct perf_event_attr *attr,
|
|
}
|
|
|
|
if (pid == 0) {
|
|
+ int old_prio;
|
|
+
|
|
/* install signal handler and notify parent */
|
|
signal(SIGUSR1, sigusr1_handler);
|
|
|
|
close(pipe_c2p[0]); /* close read */
|
|
close(pipe_p2c[1]); /* close write */
|
|
|
|
+ /* boost with a high priority so we got a higher chance
|
|
+ * that if an interrupt happens, the underlying task
|
|
+ * is this process.
|
|
+ */
|
|
+ errno = 0;
|
|
+ old_prio = getpriority(PRIO_PROCESS, 0);
|
|
+ ASSERT_OK(errno, "getpriority");
|
|
+ ASSERT_OK(setpriority(PRIO_PROCESS, 0, -20), "setpriority");
|
|
+
|
|
/* notify parent signal handler is installed */
|
|
CHECK(write(pipe_c2p[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
|
|
|
|
@@ -62,6 +75,9 @@ static void test_send_signal_common(struct perf_event_attr *attr,
|
|
/* wait for parent notification and exit */
|
|
CHECK(read(pipe_p2c[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno);
|
|
|
|
+ /* restore the old priority */
|
|
+ ASSERT_OK(setpriority(PRIO_PROCESS, 0, old_prio), "setpriority");
|
|
+
|
|
close(pipe_c2p[1]);
|
|
close(pipe_p2c[0]);
|
|
exit(0);
|
|
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
|
|
index ec281b0363b82..86f97681ad898 100644
|
|
--- a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
|
|
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
|
|
@@ -195,8 +195,10 @@ static void run_test(int cgroup_fd)
|
|
|
|
pthread_mutex_lock(&server_started_mtx);
|
|
if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread,
|
|
- (void *)&server_fd)))
|
|
+ (void *)&server_fd))) {
|
|
+ pthread_mutex_unlock(&server_started_mtx);
|
|
goto close_server_fd;
|
|
+ }
|
|
pthread_cond_wait(&server_started, &server_started_mtx);
|
|
pthread_mutex_unlock(&server_started_mtx);
|
|
|
|
diff --git a/tools/testing/selftests/bpf/progs/xdp_tx.c b/tools/testing/selftests/bpf/progs/xdp_tx.c
|
|
index 94e6c2b281cb6..5f725c720e008 100644
|
|
--- a/tools/testing/selftests/bpf/progs/xdp_tx.c
|
|
+++ b/tools/testing/selftests/bpf/progs/xdp_tx.c
|
|
@@ -3,7 +3,7 @@
|
|
#include <linux/bpf.h>
|
|
#include <bpf/bpf_helpers.h>
|
|
|
|
-SEC("tx")
|
|
+SEC("xdp")
|
|
int xdp_tx(struct xdp_md *xdp)
|
|
{
|
|
return XDP_TX;
|
|
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
|
|
index 0d92ebcb335d1..179e680e8d134 100644
|
|
--- a/tools/testing/selftests/bpf/test_maps.c
|
|
+++ b/tools/testing/selftests/bpf/test_maps.c
|
|
@@ -968,7 +968,7 @@ static void test_sockmap(unsigned int tasks, void *data)
|
|
|
|
FD_ZERO(&w);
|
|
FD_SET(sfd[3], &w);
|
|
- to.tv_sec = 1;
|
|
+ to.tv_sec = 30;
|
|
to.tv_usec = 0;
|
|
s = select(sfd[3] + 1, &w, NULL, NULL, &to);
|
|
if (s == -1) {
|
|
diff --git a/tools/testing/selftests/bpf/test_xdp_veth.sh b/tools/testing/selftests/bpf/test_xdp_veth.sh
|
|
index ba8ffcdaac302..995278e684b6e 100755
|
|
--- a/tools/testing/selftests/bpf/test_xdp_veth.sh
|
|
+++ b/tools/testing/selftests/bpf/test_xdp_veth.sh
|
|
@@ -108,7 +108,7 @@ ip link set dev veth2 xdp pinned $BPF_DIR/progs/redirect_map_1
|
|
ip link set dev veth3 xdp pinned $BPF_DIR/progs/redirect_map_2
|
|
|
|
ip -n ns1 link set dev veth11 xdp obj xdp_dummy.o sec xdp_dummy
|
|
-ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec tx
|
|
+ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec xdp
|
|
ip -n ns3 link set dev veth33 xdp obj xdp_dummy.o sec xdp_dummy
|
|
|
|
trap cleanup EXIT
|
|
diff --git a/tools/testing/selftests/firmware/fw_namespace.c b/tools/testing/selftests/firmware/fw_namespace.c
|
|
index 5ebc1aec7923b..817b2f1e8ee6a 100644
|
|
--- a/tools/testing/selftests/firmware/fw_namespace.c
|
|
+++ b/tools/testing/selftests/firmware/fw_namespace.c
|
|
@@ -129,7 +129,8 @@ int main(int argc, char **argv)
|
|
die("mounting tmpfs to /lib/firmware failed\n");
|
|
|
|
sys_path = argv[1];
|
|
- asprintf(&fw_path, "/lib/firmware/%s", fw_name);
|
|
+ if (asprintf(&fw_path, "/lib/firmware/%s", fw_name) < 0)
|
|
+ die("error: failed to build full fw_path\n");
|
|
|
|
setup_fw(fw_path);
|
|
|
|
diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions
|
|
index a6fac927ee82f..0cee6b067a374 100644
|
|
--- a/tools/testing/selftests/ftrace/test.d/functions
|
|
+++ b/tools/testing/selftests/ftrace/test.d/functions
|
|
@@ -115,7 +115,7 @@ check_requires() { # Check required files and tracers
|
|
echo "Required tracer $t is not configured."
|
|
exit_unsupported
|
|
fi
|
|
- elif [ $r != $i ]; then
|
|
+ elif [ "$r" != "$i" ]; then
|
|
if ! grep -Fq "$r" README ; then
|
|
echo "Required feature pattern \"$r\" is not in README."
|
|
exit_unsupported
|
|
diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile
|
|
index 59e417ec3e134..25d7f8f37cfd6 100644
|
|
--- a/tools/thermal/tmon/Makefile
|
|
+++ b/tools/thermal/tmon/Makefile
|
|
@@ -10,7 +10,7 @@ override CFLAGS+= $(call cc-option,-O3,-O1) ${WARNFLAGS}
|
|
# Add "-fstack-protector" only if toolchain supports it.
|
|
override CFLAGS+= $(call cc-option,-fstack-protector-strong)
|
|
CC?= $(CROSS_COMPILE)gcc
|
|
-PKG_CONFIG?= pkg-config
|
|
+PKG_CONFIG?= $(CROSS_COMPILE)pkg-config
|
|
|
|
override CFLAGS+=-D VERSION=\"$(VERSION)\"
|
|
LDFLAGS+=
|