diff --git a/config/boards/orangepipc2.conf b/config/boards/orangepipc2.conf index d6216fb2b..ef0cc3bb1 100644 --- a/config/boards/orangepipc2.conf +++ b/config/boards/orangepipc2.conf @@ -4,8 +4,6 @@ BOARDFAMILY="sun50iw2" BOOTCONFIG="orangepi_pc2_defconfig" MODULES="" MODULES_NEXT="" -CPUMIN="120000" -CPUMAX="1400000" # KERNEL_TARGET="next,dev" CLI_TARGET="stretch,bionic:next" diff --git a/config/boards/orangepiprime.conf b/config/boards/orangepiprime.conf index 8f06da092..e99e072b0 100644 --- a/config/boards/orangepiprime.conf +++ b/config/boards/orangepiprime.conf @@ -4,8 +4,6 @@ BOARDFAMILY="sun50iw2" BOOTCONFIG="orangepi_prime_defconfig" MODULES="" MODULES_NEXT="" -CPUMIN="120000" -CPUMAX="1400000" # KERNEL_TARGET="next,dev" CLI_TARGET="stretch,bionic:next" diff --git a/config/kernel/linux-sunxi64-dev.config b/config/kernel/linux-sunxi64-dev.config index f48c56c22..b1ecf07cf 100644 --- a/config/kernel/linux-sunxi64-dev.config +++ b/config/kernel/linux-sunxi64-dev.config @@ -1,14 +1,15 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 4.20.12 Kernel Configuration +# Linux/arm64 5.0.10 Kernel Configuration # # -# Compiler: aarch64-linux-gnu-gcc (Linaro GCC 7.2-2017.11) 7.2.1 20171011 +# Compiler: aarch64-linux-gnu-gcc (Linaro GCC 7.4-2019.02) 7.4.1 20181213 [linaro-7.4-2019.02 revision 56ec6f6b99cc167ff0c2f8e1a2eed33b1edc85d4] # CONFIG_CC_IS_GCC=y -CONFIG_GCC_VERSION=70201 +CONFIG_GCC_VERSION=70401 CONFIG_CLANG_VERSION=0 +CONFIG_CC_HAS_ASM_GOTO=y CONFIG_IRQ_WORK=y CONFIG_BUILDTIME_EXTABLE_SORT=y CONFIG_THREAD_INFO_IN_TASK=y @@ -32,8 +33,6 @@ CONFIG_CROSS_MEMORY_ATTACH=y CONFIG_AUDIT=y CONFIG_HAVE_ARCH_AUDITSYSCALL=y CONFIG_AUDITSYSCALL=y -CONFIG_AUDIT_WATCH=y -CONFIG_AUDIT_TREE=y # # IRQ subsystem @@ -223,6 +222,7 @@ CONFIG_GENERIC_CSUM=y CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_ZONE_DMA32=y CONFIG_HAVE_GENERIC_GUP=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y CONFIG_SMP=y CONFIG_KERNEL_MODE_NEON=y CONFIG_FIX_EARLYCON_MEM=y @@ -248,6 +248,7 @@ CONFIG_ARCH_SUNXI=y # CONFIG_ARCH_MEDIATEK is not set # CONFIG_ARCH_MESON is not set # CONFIG_ARCH_MVEBU is not set +# CONFIG_ARCH_MXC is not set # CONFIG_ARCH_QCOM is not set # CONFIG_ARCH_REALTEK is not set # CONFIG_ARCH_RENESAS is not set @@ -265,16 +266,6 @@ CONFIG_ARCH_SUNXI=y # CONFIG_ARCH_ZX is not set # CONFIG_ARCH_ZYNQMP is not set -# -# Bus support -# -# CONFIG_PCI is not set - -# -# PCI Endpoint -# -# CONFIG_PCI_ENDPOINT is not set - # # Kernel Features # @@ -282,6 +273,7 @@ CONFIG_ARCH_SUNXI=y # # ARM errata workarounds via the alternatives framework # +CONFIG_ARM64_WORKAROUND_CLEAN_CACHE=y CONFIG_ARM64_ERRATUM_826319=y CONFIG_ARM64_ERRATUM_827319=y CONFIG_ARM64_ERRATUM_824069=y @@ -291,6 +283,7 @@ CONFIG_ARM64_ERRATUM_845719=y CONFIG_ARM64_ERRATUM_843419=y CONFIG_ARM64_ERRATUM_1024718=y CONFIG_ARM64_ERRATUM_1188873=y +CONFIG_ARM64_ERRATUM_1165522=y CONFIG_ARM64_ERRATUM_1286807=y # CONFIG_CAVIUM_ERRATUM_22375 is not set CONFIG_CAVIUM_ERRATUM_23144=y @@ -342,6 +335,7 @@ CONFIG_SECCOMP=y CONFIG_PARAVIRT=y # CONFIG_PARAVIRT_TIME_ACCOUNTING is not set # CONFIG_KEXEC is not set +# CONFIG_KEXEC_FILE is not set # CONFIG_CRASH_DUMP is not set # CONFIG_XEN is not set CONFIG_FORCE_MAX_ZONEORDER=11 @@ -349,6 +343,7 @@ CONFIG_UNMAP_KERNEL_AT_EL0=y CONFIG_HARDEN_BRANCH_PREDICTOR=y CONFIG_HARDEN_EL2_VECTORS=y CONFIG_ARM64_SSBD=y +CONFIG_RODATA_FULL_DEFAULT_ENABLED=y # CONFIG_ARMV8_DEPRECATED is not set # CONFIG_ARM64_SW_TTBR0_PAN is not set @@ -367,6 +362,11 @@ CONFIG_ARM64_UAO=y # CONFIG_ARM64_PMEM is not set CONFIG_ARM64_RAS_EXTN=y CONFIG_ARM64_CNP=y + +# +# ARMv8.3 architectural features +# +CONFIG_ARM64_PTR_AUTH=y CONFIG_ARM64_SVE=y CONFIG_ARM64_MODULE_PLTS=y # CONFIG_RANDOMIZE_BASE is not set @@ -398,6 +398,7 @@ CONFIG_PM_GENERIC_DOMAINS=y CONFIG_PM_GENERIC_DOMAINS_SLEEP=y CONFIG_PM_GENERIC_DOMAINS_OF=y CONFIG_CPU_PM=y +# CONFIG_ENERGY_MODEL is not set CONFIG_ARCH_HIBERNATION_POSSIBLE=y CONFIG_ARCH_SUSPEND_POSSIBLE=y @@ -458,6 +459,7 @@ CONFIG_ARM_SCMI_POWER_DOMAIN=y CONFIG_ARM_SCPI_PROTOCOL=y CONFIG_ARM_SCPI_POWER_DOMAIN=y # CONFIG_ARM_SDE_INTERFACE is not set +# CONFIG_INTEL_STRATIX10_SERVICE is not set CONFIG_HAVE_ARM_SMCCC=y # CONFIG_GOOGLE_FIRMWARE is not set @@ -481,6 +483,7 @@ CONFIG_CRYPTO_AES_ARM64_CE_CCM=y CONFIG_CRYPTO_AES_ARM64_CE_BLK=y CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y CONFIG_CRYPTO_CHACHA20_NEON=y +# CONFIG_CRYPTO_NHPOLY1305_NEON is not set CONFIG_CRYPTO_AES_ARM64_BS=y # @@ -577,7 +580,6 @@ CONFIG_BLK_DEV_THROTTLING=y # CONFIG_BLK_CMDLINE_PARSER is not set CONFIG_BLK_WBT=y # CONFIG_BLK_CGROUP_IOLATENCY is not set -CONFIG_BLK_WBT_SQ=y CONFIG_BLK_WBT_MQ=y # CONFIG_BLK_DEBUG_FS is not set # CONFIG_BLK_SED_OPAL is not set @@ -595,14 +597,6 @@ CONFIG_BLK_PM=y # # IO Schedulers # -CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=y -CONFIG_CFQ_GROUP_IOSCHED=y -# CONFIG_DEFAULT_DEADLINE is not set -CONFIG_DEFAULT_CFQ=y -# CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="cfq" CONFIG_MQ_IOSCHED_DEADLINE=y CONFIG_MQ_IOSCHED_KYBER=y CONFIG_IOSCHED_BFQ=y @@ -695,6 +689,7 @@ CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y CONFIG_SPARSEMEM_VMEMMAP=y CONFIG_HAVE_MEMBLOCK_NODE_MAP=y CONFIG_MEMORY_ISOLATION=y +# CONFIG_MEMORY_HOTPLUG is not set CONFIG_SPLIT_PTLOCK_CPUS=4 CONFIG_MEMORY_BALLOON=y CONFIG_BALLOON_COMPACTION=y @@ -731,6 +726,7 @@ CONFIG_ARCH_HAS_PTE_SPECIAL=y CONFIG_NET=y CONFIG_COMPAT_NETLINK_MESSAGES=y CONFIG_NET_INGRESS=y +CONFIG_SKB_EXTENSIONS=y # # Networking options @@ -898,9 +894,6 @@ CONFIG_NF_CT_NETLINK_HELPER=m CONFIG_NETFILTER_NETLINK_GLUE_CT=y CONFIG_NF_NAT=m CONFIG_NF_NAT_NEEDED=y -CONFIG_NF_NAT_PROTO_DCCP=y -CONFIG_NF_NAT_PROTO_UDPLITE=y -CONFIG_NF_NAT_PROTO_SCTP=y CONFIG_NF_NAT_AMANDA=m CONFIG_NF_NAT_FTP=m CONFIG_NF_NAT_IRC=m @@ -1120,7 +1113,6 @@ CONFIG_NFT_CHAIN_NAT_IPV4=m CONFIG_NFT_MASQ_IPV4=m CONFIG_NFT_REDIR_IPV4=m CONFIG_NF_NAT_SNMP_BASIC=m -CONFIG_NF_NAT_PROTO_GRE=m CONFIG_NF_NAT_PPTP=m CONFIG_NF_NAT_H323=m CONFIG_IP_NF_IPTABLES=y @@ -1231,6 +1223,8 @@ CONFIG_HAVE_NET_DSA=y CONFIG_NET_DSA=m CONFIG_NET_DSA_LEGACY=y CONFIG_NET_DSA_TAG_GSWIP=y +CONFIG_NET_DSA_TAG_KSZ=y +CONFIG_NET_DSA_TAG_KSZ9477=y CONFIG_VLAN_8021Q=y CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_MVRP=y @@ -1395,6 +1389,7 @@ CONFIG_CAN_VXCAN=m CONFIG_CAN_SLCAN=m CONFIG_CAN_DEV=m CONFIG_CAN_CALC_BITTIMING=y +# CONFIG_CAN_FLEXCAN is not set # CONFIG_CAN_GRCAN is not set # CONFIG_CAN_XILINXCAN is not set # CONFIG_CAN_C_CAN is not set @@ -1532,6 +1527,9 @@ CONFIG_HAVE_EBPF_JIT=y # Device Drivers # CONFIG_ARM_AMBA=y +CONFIG_HAVE_PCI=y +# CONFIG_PCI is not set +# CONFIG_PCCARD is not set # # Generic Driver Options @@ -1592,7 +1590,6 @@ CONFIG_GNSS_SIRF_SERIAL=m CONFIG_GNSS_UBX_SERIAL=m CONFIG_MTD=y # CONFIG_MTD_TESTS is not set -# CONFIG_MTD_REDBOOT_PARTS is not set # CONFIG_MTD_CMDLINE_PARTS is not set # CONFIG_MTD_AFS_PARTS is not set CONFIG_MTD_OF_PARTS=y @@ -1601,6 +1598,7 @@ CONFIG_MTD_OF_PARTS=y # # Partition parsers # +# CONFIG_MTD_REDBOOT_PARTS is not set # # User Modules And Translation Layers @@ -1642,7 +1640,6 @@ CONFIG_MTD_CFI_UTIL=y # # CONFIG_MTD_COMPLEX_MAPPINGS is not set # CONFIG_MTD_PHYSMAP is not set -# CONFIG_MTD_PHYSMAP_OF is not set # CONFIG_MTD_PLATRAM is not set # @@ -1734,6 +1731,7 @@ CONFIG_ATA_OVER_ETH=m # CONFIG_USB_SWITCH_FSA9480 is not set # CONFIG_LATTICE_ECP3_CONFIG is not set # CONFIG_SRAM is not set +CONFIG_PVPANIC=m # CONFIG_C2PORT is not set # @@ -1801,7 +1799,6 @@ CONFIG_SCSI_MOD=y # CONFIG_RAID_ATTRS is not set CONFIG_SCSI=y CONFIG_SCSI_DMA=y -# CONFIG_SCSI_MQ_DEFAULT is not set # CONFIG_SCSI_PROC_FS is not set # @@ -1835,7 +1832,6 @@ CONFIG_SCSI_UFSHCD=m # CONFIG_SCSI_UFS_BSG is not set # CONFIG_SCSI_DEBUG is not set # CONFIG_SCSI_VIRTIO is not set -# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set # CONFIG_SCSI_DH is not set # CONFIG_SCSI_OSD_INITIATOR is not set CONFIG_HAVE_PATA_PLATFORM=y @@ -1926,7 +1922,9 @@ CONFIG_NLMON=m CONFIG_NET_DSA_LANTIQ_GSWIP=m # CONFIG_NET_DSA_MT7530 is not set # CONFIG_NET_DSA_MV88E6060 is not set -# CONFIG_MICROCHIP_KSZ is not set +CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON=m +CONFIG_NET_DSA_MICROCHIP_KSZ9477=m +CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI=m # CONFIG_NET_DSA_MV88E6XXX is not set # CONFIG_NET_DSA_QCA8K is not set CONFIG_NET_DSA_REALTEK_SMI=m @@ -2008,7 +2006,7 @@ CONFIG_MDIO_DEVICE=y CONFIG_MDIO_BUS=y # CONFIG_MDIO_BCM_UNIMAC is not set CONFIG_MDIO_BITBANG=y -CONFIG_MDIO_BUS_MUX=y +CONFIG_MDIO_BUS_MUX=m # CONFIG_MDIO_BUS_MUX_GPIO is not set # CONFIG_MDIO_BUS_MUX_MMIOREG is not set # CONFIG_MDIO_GPIO is not set @@ -2118,6 +2116,7 @@ CONFIG_USB_IPHETH=m CONFIG_USB_SIERRA_NET=m CONFIG_USB_VL600=m CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set CONFIG_WLAN=y CONFIG_WLAN_VENDOR_ADMTEK=y CONFIG_RTL8189ES=m @@ -2234,6 +2233,7 @@ CONFIG_RTL8812AU=m CONFIG_WLAN_VENDOR_QUANTENNA=y # CONFIG_MAC80211_HWSIM is not set CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_VIRT_WIFI=m # # Enable WiMAX (Networking options) to see the WiMAX drivers @@ -2466,6 +2466,7 @@ CONFIG_SERIO_LIBPS2=y # CONFIG_SERIO_PS2MULT is not set # CONFIG_SERIO_ARC_PS2 is not set # CONFIG_SERIO_APBPS2 is not set +CONFIG_SERIO_OLPC_APSP=m # CONFIG_SERIO_SUN4I_PS2 is not set CONFIG_SERIO_GPIO_PS2=m # CONFIG_USERIO is not set @@ -2487,6 +2488,7 @@ CONFIG_LEGACY_PTY_COUNT=16 # CONFIG_SERIAL_NONSTANDARD is not set # CONFIG_N_GSM is not set # CONFIG_TRACE_SINK is not set +CONFIG_LDISC_AUTOLOAD=y CONFIG_DEVMEM=y # @@ -2538,11 +2540,6 @@ CONFIG_HVC_DRIVER=y CONFIG_VIRTIO_CONSOLE=y # CONFIG_IPMI_HANDLER is not set # CONFIG_HW_RANDOM is not set -# CONFIG_R3964 is not set - -# -# PCMCIA character devices -# # CONFIG_RAW_DRIVER is not set # CONFIG_TCG_TPM is not set # CONFIG_XILLYBUS is not set @@ -2613,6 +2610,9 @@ CONFIG_I2C_SLAVE=y # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set # CONFIG_I2C_DEBUG_BUS is not set +CONFIG_I3C=m +CONFIG_CDNS_I3C_MASTER=m +CONFIG_DW_I3C_MASTER=m CONFIG_SPI=y CONFIG_SPI_MASTER=y CONFIG_SPI_MEM=y @@ -2633,6 +2633,7 @@ CONFIG_SPI_PL022=y # CONFIG_SPI_SC18IS602 is not set CONFIG_SPI_SUN4I=y CONFIG_SPI_SUN6I=y +CONFIG_SPI_MXIC=m # CONFIG_SPI_XCOMM is not set # CONFIG_SPI_XILINX is not set # CONFIG_SPI_ZYNQMP_GQSPI is not set @@ -2678,6 +2679,7 @@ CONFIG_PINCTRL_AXP209=m # CONFIG_PINCTRL_MCP23S08 is not set CONFIG_PINCTRL_SINGLE=y # CONFIG_PINCTRL_SX150X is not set +# CONFIG_PINCTRL_OCELOT is not set CONFIG_PINCTRL_SUNXI=y CONFIG_PINCTRL_SUN8I_H3_R=y CONFIG_PINCTRL_SUN50I_A64=y @@ -2698,6 +2700,7 @@ CONFIG_GPIO_GENERIC=y # # CONFIG_GPIO_74XX_MMIO is not set # CONFIG_GPIO_ALTERA is not set +CONFIG_GPIO_CADENCE=m CONFIG_GPIO_DWAPB=y # CONFIG_GPIO_FTGPIO010 is not set CONFIG_GPIO_GENERIC_PLATFORM=y @@ -2706,7 +2709,8 @@ CONFIG_GPIO_HLWD=m # CONFIG_GPIO_MB86S7X is not set # CONFIG_GPIO_MOCKUP is not set # CONFIG_GPIO_PL061 is not set -# CONFIG_GPIO_SYSCON is not set +CONFIG_GPIO_SAMA5D2_PIOBU=m +CONFIG_GPIO_SYSCON=m # CONFIG_GPIO_XGENE is not set # CONFIG_GPIO_XILINX is not set @@ -2910,6 +2914,8 @@ CONFIG_SENSORS_NCT6775=m CONFIG_SENSORS_NCT7802=m CONFIG_SENSORS_NCT7904=m CONFIG_SENSORS_NPCM7XX=m +CONFIG_SENSORS_OCC_P8_I2C=m +CONFIG_SENSORS_OCC=y CONFIG_SENSORS_PCF8591=m # CONFIG_PMBUS is not set CONFIG_SENSORS_PWM_FAN=m @@ -2976,13 +2982,8 @@ CONFIG_CLOCK_THERMAL=y CONFIG_DEVFREQ_THERMAL=y CONFIG_THERMAL_EMULATION=y # CONFIG_QORIQ_THERMAL is not set - -# -# ACPI INT340X thermal drivers -# CONFIG_SUN50I_H6_THS=m CONFIG_SUN8I_THS=m -# CONFIG_QCOM_SPMI_TEMP_ALARM is not set CONFIG_GENERIC_ADC_THERMAL=m CONFIG_WATCHDOG=y CONFIG_WATCHDOG_CORE=y @@ -3039,7 +3040,6 @@ CONFIG_MFD_SUN4I_GPADC=y # CONFIG_MFD_AS3722 is not set # CONFIG_PMIC_ADP5520 is not set # CONFIG_MFD_AAT2870_CORE is not set -CONFIG_MFD_AT91_USART=m # CONFIG_MFD_ATMEL_FLEXCOM is not set # CONFIG_MFD_ATMEL_HLCDC is not set # CONFIG_MFD_BCM590XX is not set @@ -3163,6 +3163,7 @@ CONFIG_REGULATOR_GPIO=y # CONFIG_REGULATOR_MAX8660 is not set # CONFIG_REGULATOR_MAX8952 is not set # CONFIG_REGULATOR_MAX8973 is not set +CONFIG_REGULATOR_MCP16502=m # CONFIG_REGULATOR_MT6311 is not set # CONFIG_REGULATOR_PFUZE100 is not set # CONFIG_REGULATOR_PV88060 is not set @@ -3217,6 +3218,7 @@ CONFIG_IR_PWM_TX=m CONFIG_IR_SUNXI=m # CONFIG_IR_SERIAL is not set # CONFIG_IR_SIR is not set +CONFIG_RC_XBOX_DVD=m CONFIG_MEDIA_SUPPORT=m # @@ -3411,6 +3413,7 @@ CONFIG_USB_HACKRF=m CONFIG_USB_MSI2500=m CONFIG_V4L_PLATFORM_DRIVERS=y # CONFIG_VIDEO_CADENCE is not set +CONFIG_VIDEO_ASPEED=m CONFIG_VIDEO_MUX=m # CONFIG_SOC_CAMERA is not set CONFIG_VIDEO_XILINX=m @@ -3623,6 +3626,7 @@ CONFIG_DVB_TDA10048=m CONFIG_DVB_AF9013=m CONFIG_DVB_EC100=m CONFIG_DVB_CXD2820R=m +CONFIG_DVB_CXD2841ER=m CONFIG_DVB_RTL2830=m CONFIG_DVB_RTL2832=m CONFIG_DVB_RTL2832_SDR=m @@ -3708,10 +3712,8 @@ CONFIG_DRM_FBDEV_EMULATION=y CONFIG_DRM_FBDEV_OVERALLOC=100 CONFIG_DRM_LOAD_EDID_FIRMWARE=y # CONFIG_DRM_DP_CEC is not set -CONFIG_DRM_TTM=m CONFIG_DRM_GEM_CMA_HELPER=y CONFIG_DRM_KMS_CMA_HELPER=y -CONFIG_DRM_SCHED=m # # I2C encoder or helper chips @@ -3758,10 +3760,12 @@ CONFIG_DRM_PANEL_INNOLUX_P079ZCA=m CONFIG_DRM_PANEL_JDI_LT070ME05000=m # CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set # CONFIG_DRM_PANEL_LG_LG4573 is not set +CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO=m CONFIG_DRM_PANEL_ORISETECH_OTM8009A=m CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00=m CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN=m CONFIG_DRM_PANEL_RAYDIUM_RM68200=m +CONFIG_DRM_PANEL_SAMSUNG_S6D16D0=m CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2=m CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03=m # CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set @@ -3769,6 +3773,7 @@ CONFIG_DRM_PANEL_SEIKO_43WVF1G=m CONFIG_DRM_PANEL_SHARP_LQ101R1SX01=m CONFIG_DRM_PANEL_SHARP_LS043T1LE01=m # CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA=m CONFIG_DRM_BRIDGE=y CONFIG_DRM_PANEL_BRIDGE=y @@ -3802,7 +3807,6 @@ CONFIG_DRM_DW_HDMI_CEC=m # CONFIG_DRM_MXSFB is not set # CONFIG_DRM_TINYDRM is not set # CONFIG_DRM_PL111 is not set -CONFIG_DRM_LIMA=m # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y @@ -3822,7 +3826,7 @@ CONFIG_FB_SYS_IMAGEBLIT=y # CONFIG_FB_FOREIGN_ENDIAN is not set CONFIG_FB_SYS_FOPS=y CONFIG_FB_DEFERRED_IO=y -CONFIG_FB_BACKLIGHT=y +CONFIG_FB_BACKLIGHT=m CONFIG_FB_MODE_HELPERS=y # CONFIG_FB_TILEBLITTING is not set @@ -3970,6 +3974,7 @@ CONFIG_SND_SUN50I_CODEC_ANALOG=m CONFIG_SND_SUN4I_I2S=m CONFIG_SND_SUN4I_SPDIF=m CONFIG_SND_SUN8I_ADDA_PR_REGMAP=m +CONFIG_SND_SOC_XILINX_I2S=m # CONFIG_SND_SOC_XTFPGA_I2S is not set # CONFIG_ZX_TDM is not set CONFIG_SND_SOC_I2C_AND_SPI=m @@ -3983,6 +3988,7 @@ CONFIG_SND_SOC_I2C_AND_SPI=m # CONFIG_SND_SOC_ADAU1761_SPI is not set # CONFIG_SND_SOC_ADAU7002 is not set # CONFIG_SND_SOC_AK4104 is not set +CONFIG_SND_SOC_AK4118=m CONFIG_SND_SOC_AK4458=m # CONFIG_SND_SOC_AK4554 is not set # CONFIG_SND_SOC_AK4613 is not set @@ -4594,6 +4600,7 @@ CONFIG_MMC_SUNXI=y # CONFIG_MMC_MTK is not set # CONFIG_MMC_SDHCI_XENON is not set # CONFIG_MMC_SDHCI_OMAP is not set +CONFIG_MMC_SDHCI_AM654=m # CONFIG_MEMSTICK is not set CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -4664,6 +4671,7 @@ CONFIG_LEDS_TRIGGER_CAMERA=m CONFIG_LEDS_TRIGGER_PANIC=y CONFIG_LEDS_TRIGGER_NETDEV=m CONFIG_LEDS_TRIGGER_PATTERN=m +CONFIG_LEDS_TRIGGER_AUDIO=m # CONFIG_ACCESSIBILITY is not set # CONFIG_INFINIBAND is not set CONFIG_EDAC_SUPPORT=y @@ -4891,7 +4899,6 @@ CONFIG_AD9834=m # # Resolver to digital converters # -# CONFIG_AD2S90 is not set # CONFIG_AD2S1210 is not set # @@ -4990,6 +4997,7 @@ CONFIG_COMMON_CLK_SI544=m # CONFIG_COMMON_CLK_XGENE is not set # CONFIG_COMMON_CLK_PWM is not set # CONFIG_COMMON_CLK_VC5 is not set +CONFIG_COMMON_CLK_BD718XX=m CONFIG_SUNXI_CCU=y CONFIG_SUN50I_A64_CCU=y CONFIG_SUN50I_H6_CCU=y @@ -5169,6 +5177,7 @@ CONFIG_STK8BA50=m # Analog to digital converters # CONFIG_AD_SIGMA_DELTA=m +CONFIG_AD7124=m CONFIG_AD7266=m CONFIG_AD7291=m CONFIG_AD7298=m @@ -5178,6 +5187,7 @@ CONFIG_AD7791=m CONFIG_AD7793=m CONFIG_AD7887=m CONFIG_AD7923=m +CONFIG_AD7949=m CONFIG_AD799X=m CONFIG_AXP20X_ADC=m CONFIG_AXP288_ADC=m @@ -5291,6 +5301,7 @@ CONFIG_AD5758=m # CONFIG_MCP4922 is not set # CONFIG_TI_DAC082S085 is not set CONFIG_TI_DAC5571=m +CONFIG_TI_DAC7311=m # CONFIG_VF610_DAC is not set # @@ -5414,6 +5425,7 @@ CONFIG_TSL2772=m CONFIG_TSL4531=m CONFIG_US5182D=m CONFIG_VCNL4000=m +CONFIG_VCNL4035=m CONFIG_VEML6070=m CONFIG_VL6180=m CONFIG_ZOPT2201=m @@ -5436,6 +5448,9 @@ CONFIG_IIO_ST_MAGN_SPI_3AXIS=m CONFIG_SENSORS_HMC5843=m CONFIG_SENSORS_HMC5843_I2C=m CONFIG_SENSORS_HMC5843_SPI=m +CONFIG_SENSORS_RM3100=m +CONFIG_SENSORS_RM3100_I2C=m +CONFIG_SENSORS_RM3100_SPI=m # # Multiplexers @@ -5466,6 +5481,7 @@ CONFIG_AD5272=m CONFIG_MCP4018=m # CONFIG_MCP4131 is not set # CONFIG_MCP4531 is not set +CONFIG_MCP41010=m # CONFIG_TPL0102 is not set # @@ -5511,6 +5527,7 @@ CONFIG_VL53L0X_I2C=m # # Resolver to digital converters # +# CONFIG_AD2S90 is not set # CONFIG_AD2S1200 is not set # @@ -5538,6 +5555,7 @@ CONFIG_ARM_GIC=y CONFIG_ARM_GIC_MAX_NR=1 CONFIG_ARM_GIC_V3=y CONFIG_ARM_GIC_V3_ITS=y +CONFIG_MADERA_IRQ=m CONFIG_PARTITION_PERCPU=y # CONFIG_IPACK_BUS is not set CONFIG_ARCH_HAS_RESET_CONTROLLER=y @@ -5557,6 +5575,8 @@ CONFIG_PHY_SUN9I_USB=y CONFIG_PHY_SUN50I_USB3=y # CONFIG_BCM_KONA_USB2_PHY is not set CONFIG_PHY_CADENCE_DP=m +CONFIG_PHY_CADENCE_SIERRA=m +CONFIG_PHY_FSL_IMX8MQ_USB=m # CONFIG_PHY_PXA_28NM_HSIC is not set # CONFIG_PHY_PXA_28NM_USB2 is not set # CONFIG_PHY_CPCAP_USB is not set @@ -5994,6 +6014,8 @@ CONFIG_CRYPTO_OFB=m # CONFIG_CRYPTO_PCBC is not set CONFIG_CRYPTO_XTS=y # CONFIG_CRYPTO_KEYWRAP is not set +CONFIG_CRYPTO_NHPOLY1305=m +CONFIG_CRYPTO_ADIANTUM=m # # Hash modes @@ -6010,7 +6032,7 @@ CONFIG_CRYPTO_CRC32C=y CONFIG_CRYPTO_CRC32=y # CONFIG_CRYPTO_CRCT10DIF is not set CONFIG_CRYPTO_GHASH=y -# CONFIG_CRYPTO_POLY1305 is not set +CONFIG_CRYPTO_POLY1305=m CONFIG_CRYPTO_MD4=y CONFIG_CRYPTO_MD5=y # CONFIG_CRYPTO_MICHAEL_MIC is not set @@ -6023,6 +6045,7 @@ CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=m # CONFIG_CRYPTO_SHA3 is not set # CONFIG_CRYPTO_SM3 is not set +CONFIG_CRYPTO_STREEBOG=m # CONFIG_CRYPTO_TGR192 is not set CONFIG_CRYPTO_WP512=y @@ -6075,6 +6098,7 @@ CONFIG_CRYPTO_USER_API_HASH=y CONFIG_CRYPTO_USER_API_SKCIPHER=y CONFIG_CRYPTO_USER_API_RNG=y CONFIG_CRYPTO_USER_API_AEAD=y +# CONFIG_CRYPTO_STATS is not set CONFIG_CRYPTO_HASH_INFO=y # CONFIG_CRYPTO_HW is not set CONFIG_ASYMMETRIC_KEY_TYPE=y @@ -6098,6 +6122,7 @@ CONFIG_SYSTEM_TRUSTED_KEYS="" # Library routines # CONFIG_RAID6_PQ=y +CONFIG_RAID6_PQ_BENCHMARK=y CONFIG_BITREVERSE=y CONFIG_HAVE_ARCH_BITREVERSE=y CONFIG_RATIONAL=y @@ -6169,8 +6194,9 @@ CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN=y CONFIG_ARCH_HAS_DMA_MMAP_PGPROT=y -CONFIG_DMA_DIRECT_OPS=y CONFIG_SWIOTLB=y +CONFIG_DMA_REMAP=y +CONFIG_DMA_DIRECT_REMAP=y CONFIG_SGL_ALLOC=y CONFIG_CPU_RMAP=y CONFIG_DQL=y @@ -6189,7 +6215,6 @@ CONFIG_FONT_SUPPORT=y CONFIG_FONT_8x8=y CONFIG_FONT_8x16=y CONFIG_SG_POOL=y -CONFIG_ARCH_HAS_SG_CHAIN=y CONFIG_SBITMAP=y # CONFIG_STRING_SELFTEST is not set @@ -6236,7 +6261,10 @@ CONFIG_HAVE_DEBUG_KMEMLEAK=y CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y +CONFIG_CC_HAS_KASAN_GENERIC=y # CONFIG_KASAN is not set +CONFIG_KASAN_STACK=1 CONFIG_ARCH_HAS_KCOV=y CONFIG_CC_HAS_SANCOV_TRACE_PC=y # CONFIG_KCOV is not set diff --git a/patch/atf/atf-sunxi64/enable-additional-regulators.patch b/patch/atf/atf-sunxi64/enable-additional-regulators.patch.disabled similarity index 100% rename from patch/atf/atf-sunxi64/enable-additional-regulators.patch rename to patch/atf/atf-sunxi64/enable-additional-regulators.patch.disabled diff --git a/patch/atf/atf-sunxi64/set-rsb-to-nonsec.patch b/patch/atf/atf-sunxi64/set-rsb-to-nonsec.patch.disabled similarity index 100% rename from patch/atf/atf-sunxi64/set-rsb-to-nonsec.patch rename to patch/atf/atf-sunxi64/set-rsb-to-nonsec.patch.disabled diff --git a/patch/kernel/sunxi-dev/board-h6-add-orangepi-one-plus-and-lite2.patch b/patch/kernel/sunxi-dev/board-h6-add-orangepi-one-plus-and-lite2.patch_broken_below_5.1 similarity index 100% rename from patch/kernel/sunxi-dev/board-h6-add-orangepi-one-plus-and-lite2.patch rename to patch/kernel/sunxi-dev/board-h6-add-orangepi-one-plus-and-lite2.patch_broken_below_5.1 diff --git a/patch/kernel/sunxi-dev/patch-5.0.7-8.patch b/patch/kernel/sunxi-dev/patch-5.0.7-8.patch deleted file mode 100644 index 6680fecfb..000000000 --- a/patch/kernel/sunxi-dev/patch-5.0.7-8.patch +++ /dev/null @@ -1,5075 +0,0 @@ -diff --git a/Makefile b/Makefile -index af99c77c7066..f7666051de66 100644 ---- a/Makefile -+++ b/Makefile -@@ -1,7 +1,7 @@ - # SPDX-License-Identifier: GPL-2.0 - VERSION = 5 - PATCHLEVEL = 0 --SUBLEVEL = 7 -+SUBLEVEL = 8 - EXTRAVERSION = - NAME = Shy Crocodile - -@@ -510,7 +510,7 @@ endif - ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) - ifneq ($(CROSS_COMPILE),) - CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%)) --GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD))) -+GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) - CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR) - GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) - endif -diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts -index dce5be5df97b..edcff79879e7 100644 ---- a/arch/arm/boot/dts/am335x-evm.dts -+++ b/arch/arm/boot/dts/am335x-evm.dts -@@ -57,6 +57,24 @@ - enable-active-high; - }; - -+ /* TPS79501 */ -+ v1_8d_reg: fixedregulator-v1_8d { -+ compatible = "regulator-fixed"; -+ regulator-name = "v1_8d"; -+ vin-supply = <&vbat>; -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; -+ }; -+ -+ /* TPS79501 */ -+ v3_3d_reg: fixedregulator-v3_3d { -+ compatible = "regulator-fixed"; -+ regulator-name = "v3_3d"; -+ vin-supply = <&vbat>; -+ regulator-min-microvolt = <3300000>; -+ regulator-max-microvolt = <3300000>; -+ }; -+ - matrix_keypad: matrix_keypad0 { - compatible = "gpio-matrix-keypad"; - debounce-delay-ms = <5>; -@@ -499,10 +517,10 @@ - status = "okay"; - - /* Regulators */ -- AVDD-supply = <&vaux2_reg>; -- IOVDD-supply = <&vaux2_reg>; -- DRVDD-supply = <&vaux2_reg>; -- DVDD-supply = <&vbat>; -+ AVDD-supply = <&v3_3d_reg>; -+ IOVDD-supply = <&v3_3d_reg>; -+ DRVDD-supply = <&v3_3d_reg>; -+ DVDD-supply = <&v1_8d_reg>; - }; - }; - -diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts -index b128998097ce..2c2d8b5b8cf5 100644 ---- a/arch/arm/boot/dts/am335x-evmsk.dts -+++ b/arch/arm/boot/dts/am335x-evmsk.dts -@@ -73,6 +73,24 @@ - enable-active-high; - }; - -+ /* TPS79518 */ -+ v1_8d_reg: fixedregulator-v1_8d { -+ compatible = "regulator-fixed"; -+ regulator-name = "v1_8d"; -+ vin-supply = <&vbat>; -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; -+ }; -+ -+ /* TPS78633 */ -+ v3_3d_reg: fixedregulator-v3_3d { -+ compatible = "regulator-fixed"; -+ regulator-name = "v3_3d"; -+ vin-supply = <&vbat>; -+ regulator-min-microvolt = <3300000>; -+ regulator-max-microvolt = <3300000>; -+ }; -+ - leds { - pinctrl-names = "default"; - pinctrl-0 = <&user_leds_s0>; -@@ -501,10 +519,10 @@ - status = "okay"; - - /* Regulators */ -- AVDD-supply = <&vaux2_reg>; -- IOVDD-supply = <&vaux2_reg>; -- DRVDD-supply = <&vaux2_reg>; -- DVDD-supply = <&vbat>; -+ AVDD-supply = <&v3_3d_reg>; -+ IOVDD-supply = <&v3_3d_reg>; -+ DRVDD-supply = <&v3_3d_reg>; -+ DVDD-supply = <&v1_8d_reg>; - }; - }; - -diff --git a/arch/arm/boot/dts/rk3288-tinker.dtsi b/arch/arm/boot/dts/rk3288-tinker.dtsi -index aa107ee41b8b..ef653c3209bc 100644 ---- a/arch/arm/boot/dts/rk3288-tinker.dtsi -+++ b/arch/arm/boot/dts/rk3288-tinker.dtsi -@@ -254,6 +254,7 @@ - }; - - vccio_sd: LDO_REG5 { -+ regulator-boot-on; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <3300000>; - regulator-name = "vccio_sd"; -@@ -430,7 +431,7 @@ - bus-width = <4>; - cap-mmc-highspeed; - cap-sd-highspeed; -- card-detect-delay = <200>; -+ broken-cd; - disable-wp; /* wp not hooked up */ - pinctrl-names = "default"; - pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; -diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi -index ca7d52daa8fb..09868dcee34b 100644 ---- a/arch/arm/boot/dts/rk3288.dtsi -+++ b/arch/arm/boot/dts/rk3288.dtsi -@@ -70,7 +70,7 @@ - compatible = "arm,cortex-a12"; - reg = <0x501>; - resets = <&cru SRST_CORE1>; -- operating-points = <&cpu_opp_table>; -+ operating-points-v2 = <&cpu_opp_table>; - #cooling-cells = <2>; /* min followed by max */ - clock-latency = <40000>; - clocks = <&cru ARMCLK>; -@@ -80,7 +80,7 @@ - compatible = "arm,cortex-a12"; - reg = <0x502>; - resets = <&cru SRST_CORE2>; -- operating-points = <&cpu_opp_table>; -+ operating-points-v2 = <&cpu_opp_table>; - #cooling-cells = <2>; /* min followed by max */ - clock-latency = <40000>; - clocks = <&cru ARMCLK>; -@@ -90,7 +90,7 @@ - compatible = "arm,cortex-a12"; - reg = <0x503>; - resets = <&cru SRST_CORE3>; -- operating-points = <&cpu_opp_table>; -+ operating-points-v2 = <&cpu_opp_table>; - #cooling-cells = <2>; /* min followed by max */ - clock-latency = <40000>; - clocks = <&cru ARMCLK>; -diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h -index 1c01a6f843d8..28a2e45752fe 100644 ---- a/arch/arm/boot/dts/sama5d2-pinfunc.h -+++ b/arch/arm/boot/dts/sama5d2-pinfunc.h -@@ -518,7 +518,7 @@ - #define PIN_PC9__GPIO PINMUX_PIN(PIN_PC9, 0, 0) - #define PIN_PC9__FIQ PINMUX_PIN(PIN_PC9, 1, 3) - #define PIN_PC9__GTSUCOMP PINMUX_PIN(PIN_PC9, 2, 1) --#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 2, 1) -+#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 3, 1) - #define PIN_PC9__TIOA4 PINMUX_PIN(PIN_PC9, 4, 2) - #define PIN_PC10 74 - #define PIN_PC10__GPIO PINMUX_PIN(PIN_PC10, 0, 0) -diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c -index c4c0a8ea11e4..ee410ae7369e 100644 ---- a/arch/arm/mach-omap1/board-ams-delta.c -+++ b/arch/arm/mach-omap1/board-ams-delta.c -@@ -182,6 +182,7 @@ static struct resource latch1_resources[] = { - - static struct bgpio_pdata latch1_pdata = { - .label = LATCH1_LABEL, -+ .base = -1, - .ngpio = LATCH1_NGPIO, - }; - -@@ -219,6 +220,7 @@ static struct resource latch2_resources[] = { - - static struct bgpio_pdata latch2_pdata = { - .label = LATCH2_LABEL, -+ .base = -1, - .ngpio = LATCH2_NGPIO, - }; - -diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts -index 040b36ef0dd2..520ed8e474be 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts -+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts -@@ -46,8 +46,7 @@ - - vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator { - compatible = "regulator-fixed"; -- enable-active-high; -- gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>; -+ gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>; - pinctrl-names = "default"; - pinctrl-0 = <&usb20_host_drv>; - regulator-name = "vcc_host1_5v"; -diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi -index ecd7f19c3542..97aa65455b4a 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi -+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi -@@ -1431,11 +1431,11 @@ - - sdmmc0 { - sdmmc0_clk: sdmmc0-clk { -- rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_4ma>; -+ rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_8ma>; - }; - - sdmmc0_cmd: sdmmc0-cmd { -- rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_4ma>; -+ rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_8ma>; - }; - - sdmmc0_dectn: sdmmc0-dectn { -@@ -1447,14 +1447,14 @@ - }; - - sdmmc0_bus1: sdmmc0-bus1 { -- rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>; -+ rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>; - }; - - sdmmc0_bus4: sdmmc0-bus4 { -- rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>, -- <1 RK_PA1 1 &pcfg_pull_up_4ma>, -- <1 RK_PA2 1 &pcfg_pull_up_4ma>, -- <1 RK_PA3 1 &pcfg_pull_up_4ma>; -+ rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>, -+ <1 RK_PA1 1 &pcfg_pull_up_8ma>, -+ <1 RK_PA2 1 &pcfg_pull_up_8ma>, -+ <1 RK_PA3 1 &pcfg_pull_up_8ma>; - }; - - sdmmc0_gpio: sdmmc0-gpio { -@@ -1628,50 +1628,50 @@ - rgmiim1_pins: rgmiim1-pins { - rockchip,pins = - /* mac_txclk */ -- <1 RK_PB4 2 &pcfg_pull_none_12ma>, -+ <1 RK_PB4 2 &pcfg_pull_none_8ma>, - /* mac_rxclk */ -- <1 RK_PB5 2 &pcfg_pull_none_2ma>, -+ <1 RK_PB5 2 &pcfg_pull_none_4ma>, - /* mac_mdio */ -- <1 RK_PC3 2 &pcfg_pull_none_2ma>, -+ <1 RK_PC3 2 &pcfg_pull_none_4ma>, - /* mac_txen */ -- <1 RK_PD1 2 &pcfg_pull_none_12ma>, -+ <1 RK_PD1 2 &pcfg_pull_none_8ma>, - /* mac_clk */ -- <1 RK_PC5 2 &pcfg_pull_none_2ma>, -+ <1 RK_PC5 2 &pcfg_pull_none_4ma>, - /* mac_rxdv */ -- <1 RK_PC6 2 &pcfg_pull_none_2ma>, -+ <1 RK_PC6 2 &pcfg_pull_none_4ma>, - /* mac_mdc */ -- <1 RK_PC7 2 &pcfg_pull_none_2ma>, -+ <1 RK_PC7 2 &pcfg_pull_none_4ma>, - /* mac_rxd1 */ -- <1 RK_PB2 2 &pcfg_pull_none_2ma>, -+ <1 RK_PB2 2 &pcfg_pull_none_4ma>, - /* mac_rxd0 */ -- <1 RK_PB3 2 &pcfg_pull_none_2ma>, -+ <1 RK_PB3 2 &pcfg_pull_none_4ma>, - /* mac_txd1 */ -- <1 RK_PB0 2 &pcfg_pull_none_12ma>, -+ <1 RK_PB0 2 &pcfg_pull_none_8ma>, - /* mac_txd0 */ -- <1 RK_PB1 2 &pcfg_pull_none_12ma>, -+ <1 RK_PB1 2 &pcfg_pull_none_8ma>, - /* mac_rxd3 */ -- <1 RK_PB6 2 &pcfg_pull_none_2ma>, -+ <1 RK_PB6 2 &pcfg_pull_none_4ma>, - /* mac_rxd2 */ -- <1 RK_PB7 2 &pcfg_pull_none_2ma>, -+ <1 RK_PB7 2 &pcfg_pull_none_4ma>, - /* mac_txd3 */ -- <1 RK_PC0 2 &pcfg_pull_none_12ma>, -+ <1 RK_PC0 2 &pcfg_pull_none_8ma>, - /* mac_txd2 */ -- <1 RK_PC1 2 &pcfg_pull_none_12ma>, -+ <1 RK_PC1 2 &pcfg_pull_none_8ma>, - - /* mac_txclk */ -- <0 RK_PB0 1 &pcfg_pull_none>, -+ <0 RK_PB0 1 &pcfg_pull_none_8ma>, - /* mac_txen */ -- <0 RK_PB4 1 &pcfg_pull_none>, -+ <0 RK_PB4 1 &pcfg_pull_none_8ma>, - /* mac_clk */ -- <0 RK_PD0 1 &pcfg_pull_none>, -+ <0 RK_PD0 1 &pcfg_pull_none_4ma>, - /* mac_txd1 */ -- <0 RK_PC0 1 &pcfg_pull_none>, -+ <0 RK_PC0 1 &pcfg_pull_none_8ma>, - /* mac_txd0 */ -- <0 RK_PC1 1 &pcfg_pull_none>, -+ <0 RK_PC1 1 &pcfg_pull_none_8ma>, - /* mac_txd3 */ -- <0 RK_PC7 1 &pcfg_pull_none>, -+ <0 RK_PC7 1 &pcfg_pull_none_8ma>, - /* mac_txd2 */ -- <0 RK_PC6 1 &pcfg_pull_none>; -+ <0 RK_PC6 1 &pcfg_pull_none_8ma>; - }; - - rmiim1_pins: rmiim1-pins { -diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h -index cccb83ad7fa8..e1d95f08f8e1 100644 ---- a/arch/arm64/include/asm/futex.h -+++ b/arch/arm64/include/asm/futex.h -@@ -30,8 +30,8 @@ do { \ - " prfm pstl1strm, %2\n" \ - "1: ldxr %w1, %2\n" \ - insn "\n" \ --"2: stlxr %w3, %w0, %2\n" \ --" cbnz %w3, 1b\n" \ -+"2: stlxr %w0, %w3, %2\n" \ -+" cbnz %w0, 1b\n" \ - " dmb ish\n" \ - "3:\n" \ - " .pushsection .fixup,\"ax\"\n" \ -@@ -50,30 +50,30 @@ do { \ - static inline int - arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr) - { -- int oldval = 0, ret, tmp; -+ int oldval, ret, tmp; - u32 __user *uaddr = __uaccess_mask_ptr(_uaddr); - - pagefault_disable(); - - switch (op) { - case FUTEX_OP_SET: -- __futex_atomic_op("mov %w0, %w4", -+ __futex_atomic_op("mov %w3, %w4", - ret, oldval, uaddr, tmp, oparg); - break; - case FUTEX_OP_ADD: -- __futex_atomic_op("add %w0, %w1, %w4", -+ __futex_atomic_op("add %w3, %w1, %w4", - ret, oldval, uaddr, tmp, oparg); - break; - case FUTEX_OP_OR: -- __futex_atomic_op("orr %w0, %w1, %w4", -+ __futex_atomic_op("orr %w3, %w1, %w4", - ret, oldval, uaddr, tmp, oparg); - break; - case FUTEX_OP_ANDN: -- __futex_atomic_op("and %w0, %w1, %w4", -+ __futex_atomic_op("and %w3, %w1, %w4", - ret, oldval, uaddr, tmp, ~oparg); - break; - case FUTEX_OP_XOR: -- __futex_atomic_op("eor %w0, %w1, %w4", -+ __futex_atomic_op("eor %w3, %w1, %w4", - ret, oldval, uaddr, tmp, oparg); - break; - default: -diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h -index 905e1bb0e7bd..cd9f4e9d04d3 100644 ---- a/arch/arm64/include/asm/module.h -+++ b/arch/arm64/include/asm/module.h -@@ -73,4 +73,9 @@ static inline bool is_forbidden_offset_for_adrp(void *place) - struct plt_entry get_plt_entry(u64 dst, void *pc); - bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b); - -+static inline bool plt_entry_is_initialized(const struct plt_entry *e) -+{ -+ return e->adrp || e->add || e->br; -+} -+ - #endif /* __ASM_MODULE_H */ -diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c -index 8e4431a8821f..07b298120182 100644 ---- a/arch/arm64/kernel/ftrace.c -+++ b/arch/arm64/kernel/ftrace.c -@@ -107,8 +107,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) - trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline); - if (!plt_entries_equal(mod->arch.ftrace_trampoline, - &trampoline)) { -- if (!plt_entries_equal(mod->arch.ftrace_trampoline, -- &(struct plt_entry){})) { -+ if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) { - pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); - return -EINVAL; - } -diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c -index 4e2fb877f8d5..92bfeb3e8d7c 100644 ---- a/arch/arm64/kernel/traps.c -+++ b/arch/arm64/kernel/traps.c -@@ -102,10 +102,16 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) - void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) - { - struct stackframe frame; -- int skip; -+ int skip = 0; - - pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); - -+ if (regs) { -+ if (user_mode(regs)) -+ return; -+ skip = 1; -+ } -+ - if (!tsk) - tsk = current; - -@@ -126,7 +132,6 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) - frame.graph = 0; - #endif - -- skip = !!regs; - printk("Call trace:\n"); - do { - /* skip until specified stack frame */ -@@ -176,15 +181,13 @@ static int __die(const char *str, int err, struct pt_regs *regs) - return ret; - - print_modules(); -- __show_regs(regs); - pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", - TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), - end_of_stack(tsk)); -+ show_regs(regs); - -- if (!user_mode(regs)) { -- dump_backtrace(regs, tsk); -+ if (!user_mode(regs)) - dump_instr(KERN_EMERG, regs); -- } - - return ret; - } -diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h -index d637445737b7..9a9cd81e66c1 100644 ---- a/arch/csky/include/asm/syscall.h -+++ b/arch/csky/include/asm/syscall.h -@@ -49,10 +49,11 @@ syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, - if (i == 0) { - args[0] = regs->orig_a0; - args++; -- i++; - n--; -+ } else { -+ i--; - } -- memcpy(args, ®s->a1 + i * sizeof(regs->a1), n * sizeof(args[0])); -+ memcpy(args, ®s->a1 + i, n * sizeof(args[0])); - } - - static inline void -@@ -63,10 +64,11 @@ syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, - if (i == 0) { - regs->orig_a0 = args[0]; - args++; -- i++; - n--; -+ } else { -+ i--; - } -- memcpy(®s->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0)); -+ memcpy(®s->a1 + i, args, n * sizeof(regs->a1)); - } - - static inline int -diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h -index 2a27b275ab09..9ff033d261ab 100644 ---- a/arch/parisc/include/asm/ptrace.h -+++ b/arch/parisc/include/asm/ptrace.h -@@ -22,13 +22,14 @@ unsigned long profile_pc(struct pt_regs *); - - static inline unsigned long regs_return_value(struct pt_regs *regs) - { -- return regs->gr[20]; -+ return regs->gr[28]; - } - - static inline void instruction_pointer_set(struct pt_regs *regs, - unsigned long val) - { -- regs->iaoq[0] = val; -+ regs->iaoq[0] = val; -+ regs->iaoq[1] = val + 4; - } - - /* Query offset/name of register from its name/offset */ -diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c -index eb39e7e380d7..841db71958cd 100644 ---- a/arch/parisc/kernel/process.c -+++ b/arch/parisc/kernel/process.c -@@ -210,12 +210,6 @@ void __cpuidle arch_cpu_idle(void) - - static int __init parisc_idle_init(void) - { -- const char *marker; -- -- /* check QEMU/SeaBIOS marker in PAGE0 */ -- marker = (char *) &PAGE0->pad0; -- running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0); -- - if (!running_on_qemu) - cpu_idle_poll_ctrl(1); - -diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c -index f2cf86ac279b..25946624ce6a 100644 ---- a/arch/parisc/kernel/setup.c -+++ b/arch/parisc/kernel/setup.c -@@ -396,6 +396,9 @@ void __init start_parisc(void) - int ret, cpunum; - struct pdc_coproc_cfg coproc_cfg; - -+ /* check QEMU/SeaBIOS marker in PAGE0 */ -+ running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0); -+ - cpunum = smp_processor_id(); - - init_cpu_topology(); -diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S -index 9e253ce27e08..4fee6c9887db 100644 ---- a/arch/powerpc/kernel/exceptions-64s.S -+++ b/arch/powerpc/kernel/exceptions-64s.S -@@ -612,11 +612,17 @@ EXC_COMMON_BEGIN(data_access_slb_common) - ld r4,PACA_EXSLB+EX_DAR(r13) - std r4,_DAR(r1) - addi r3,r1,STACK_FRAME_OVERHEAD -+BEGIN_MMU_FTR_SECTION -+ /* HPT case, do SLB fault */ - bl do_slb_fault - cmpdi r3,0 - bne- 1f - b fast_exception_return - 1: /* Error case */ -+MMU_FTR_SECTION_ELSE -+ /* Radix case, access is outside page table range */ -+ li r3,-EFAULT -+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) - std r3,RESULT(r1) - bl save_nvgprs - RECONCILE_IRQ_STATE(r10, r11) -@@ -661,11 +667,17 @@ EXC_COMMON_BEGIN(instruction_access_slb_common) - EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB) - ld r4,_NIP(r1) - addi r3,r1,STACK_FRAME_OVERHEAD -+BEGIN_MMU_FTR_SECTION -+ /* HPT case, do SLB fault */ - bl do_slb_fault - cmpdi r3,0 - bne- 1f - b fast_exception_return - 1: /* Error case */ -+MMU_FTR_SECTION_ELSE -+ /* Radix case, access is outside page table range */ -+ li r3,-EFAULT -+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) - std r3,RESULT(r1) - bl save_nvgprs - RECONCILE_IRQ_STATE(r10, r11) -diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h -index bba3da6ef157..6ea9e1804233 100644 ---- a/arch/riscv/include/asm/syscall.h -+++ b/arch/riscv/include/asm/syscall.h -@@ -79,10 +79,11 @@ static inline void syscall_get_arguments(struct task_struct *task, - if (i == 0) { - args[0] = regs->orig_a0; - args++; -- i++; - n--; -+ } else { -+ i--; - } -- memcpy(args, ®s->a1 + i * sizeof(regs->a1), n * sizeof(args[0])); -+ memcpy(args, ®s->a1 + i, n * sizeof(args[0])); - } - - static inline void syscall_set_arguments(struct task_struct *task, -@@ -94,10 +95,11 @@ static inline void syscall_set_arguments(struct task_struct *task, - if (i == 0) { - regs->orig_a0 = args[0]; - args++; -- i++; - n--; -- } -- memcpy(®s->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0)); -+ } else { -+ i--; -+ } -+ memcpy(®s->a1 + i, args, n * sizeof(regs->a1)); - } - - static inline int syscall_get_arch(void) -diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c -index 7d2d7c801dba..0ecfac84ba91 100644 ---- a/arch/x86/events/amd/core.c -+++ b/arch/x86/events/amd/core.c -@@ -3,10 +3,14 @@ - #include - #include - #include -+#include - #include -+#include - - #include "../perf_event.h" - -+static DEFINE_PER_CPU(unsigned int, perf_nmi_counter); -+ - static __initconst const u64 amd_hw_cache_event_ids - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] -@@ -429,6 +433,132 @@ static void amd_pmu_cpu_dead(int cpu) - } - } - -+/* -+ * When a PMC counter overflows, an NMI is used to process the event and -+ * reset the counter. NMI latency can result in the counter being updated -+ * before the NMI can run, which can result in what appear to be spurious -+ * NMIs. This function is intended to wait for the NMI to run and reset -+ * the counter to avoid possible unhandled NMI messages. -+ */ -+#define OVERFLOW_WAIT_COUNT 50 -+ -+static void amd_pmu_wait_on_overflow(int idx) -+{ -+ unsigned int i; -+ u64 counter; -+ -+ /* -+ * Wait for the counter to be reset if it has overflowed. This loop -+ * should exit very, very quickly, but just in case, don't wait -+ * forever... -+ */ -+ for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) { -+ rdmsrl(x86_pmu_event_addr(idx), counter); -+ if (counter & (1ULL << (x86_pmu.cntval_bits - 1))) -+ break; -+ -+ /* Might be in IRQ context, so can't sleep */ -+ udelay(1); -+ } -+} -+ -+static void amd_pmu_disable_all(void) -+{ -+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); -+ int idx; -+ -+ x86_pmu_disable_all(); -+ -+ /* -+ * This shouldn't be called from NMI context, but add a safeguard here -+ * to return, since if we're in NMI context we can't wait for an NMI -+ * to reset an overflowed counter value. -+ */ -+ if (in_nmi()) -+ return; -+ -+ /* -+ * Check each counter for overflow and wait for it to be reset by the -+ * NMI if it has overflowed. This relies on the fact that all active -+ * counters are always enabled when this function is caled and -+ * ARCH_PERFMON_EVENTSEL_INT is always set. -+ */ -+ for (idx = 0; idx < x86_pmu.num_counters; idx++) { -+ if (!test_bit(idx, cpuc->active_mask)) -+ continue; -+ -+ amd_pmu_wait_on_overflow(idx); -+ } -+} -+ -+static void amd_pmu_disable_event(struct perf_event *event) -+{ -+ x86_pmu_disable_event(event); -+ -+ /* -+ * This can be called from NMI context (via x86_pmu_stop). The counter -+ * may have overflowed, but either way, we'll never see it get reset -+ * by the NMI if we're already in the NMI. And the NMI latency support -+ * below will take care of any pending NMI that might have been -+ * generated by the overflow. -+ */ -+ if (in_nmi()) -+ return; -+ -+ amd_pmu_wait_on_overflow(event->hw.idx); -+} -+ -+/* -+ * Because of NMI latency, if multiple PMC counters are active or other sources -+ * of NMIs are received, the perf NMI handler can handle one or more overflowed -+ * PMC counters outside of the NMI associated with the PMC overflow. If the NMI -+ * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel -+ * back-to-back NMI support won't be active. This PMC handler needs to take into -+ * account that this can occur, otherwise this could result in unknown NMI -+ * messages being issued. Examples of this is PMC overflow while in the NMI -+ * handler when multiple PMCs are active or PMC overflow while handling some -+ * other source of an NMI. -+ * -+ * Attempt to mitigate this by using the number of active PMCs to determine -+ * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset -+ * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the -+ * number of active PMCs or 2. The value of 2 is used in case an NMI does not -+ * arrive at the LAPIC in time to be collapsed into an already pending NMI. -+ */ -+static int amd_pmu_handle_irq(struct pt_regs *regs) -+{ -+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); -+ int active, handled; -+ -+ /* -+ * Obtain the active count before calling x86_pmu_handle_irq() since -+ * it is possible that x86_pmu_handle_irq() may make a counter -+ * inactive (through x86_pmu_stop). -+ */ -+ active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX); -+ -+ /* Process any counter overflows */ -+ handled = x86_pmu_handle_irq(regs); -+ -+ /* -+ * If a counter was handled, record the number of possible remaining -+ * NMIs that can occur. -+ */ -+ if (handled) { -+ this_cpu_write(perf_nmi_counter, -+ min_t(unsigned int, 2, active)); -+ -+ return handled; -+ } -+ -+ if (!this_cpu_read(perf_nmi_counter)) -+ return NMI_DONE; -+ -+ this_cpu_dec(perf_nmi_counter); -+ -+ return NMI_HANDLED; -+} -+ - static struct event_constraint * - amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx, - struct perf_event *event) -@@ -621,11 +751,11 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config) - - static __initconst const struct x86_pmu amd_pmu = { - .name = "AMD", -- .handle_irq = x86_pmu_handle_irq, -- .disable_all = x86_pmu_disable_all, -+ .handle_irq = amd_pmu_handle_irq, -+ .disable_all = amd_pmu_disable_all, - .enable_all = x86_pmu_enable_all, - .enable = x86_pmu_enable_event, -- .disable = x86_pmu_disable_event, -+ .disable = amd_pmu_disable_event, - .hw_config = amd_pmu_hw_config, - .schedule_events = x86_schedule_events, - .eventsel = MSR_K7_EVNTSEL0, -@@ -732,7 +862,7 @@ void amd_pmu_enable_virt(void) - cpuc->perf_ctr_virt_mask = 0; - - /* Reload all events */ -- x86_pmu_disable_all(); -+ amd_pmu_disable_all(); - x86_pmu_enable_all(0); - } - EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); -@@ -750,7 +880,7 @@ void amd_pmu_disable_virt(void) - cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; - - /* Reload all events */ -- x86_pmu_disable_all(); -+ amd_pmu_disable_all(); - x86_pmu_enable_all(0); - } - EXPORT_SYMBOL_GPL(amd_pmu_disable_virt); -diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c -index e2b1447192a8..81911e11a15d 100644 ---- a/arch/x86/events/core.c -+++ b/arch/x86/events/core.c -@@ -1349,8 +1349,9 @@ void x86_pmu_stop(struct perf_event *event, int flags) - struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); - struct hw_perf_event *hwc = &event->hw; - -- if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { -+ if (test_bit(hwc->idx, cpuc->active_mask)) { - x86_pmu.disable(event); -+ __clear_bit(hwc->idx, cpuc->active_mask); - cpuc->events[hwc->idx] = NULL; - WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); - hwc->state |= PERF_HES_STOPPED; -@@ -1447,16 +1448,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs) - apic_write(APIC_LVTPC, APIC_DM_NMI); - - for (idx = 0; idx < x86_pmu.num_counters; idx++) { -- if (!test_bit(idx, cpuc->active_mask)) { -- /* -- * Though we deactivated the counter some cpus -- * might still deliver spurious interrupts still -- * in flight. Catch them: -- */ -- if (__test_and_clear_bit(idx, cpuc->running)) -- handled++; -+ if (!test_bit(idx, cpuc->active_mask)) - continue; -- } - - event = cpuc->events[idx]; - -diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h -index ad7b210aa3f6..8e790ec219a5 100644 ---- a/arch/x86/include/asm/bitops.h -+++ b/arch/x86/include/asm/bitops.h -@@ -36,22 +36,17 @@ - * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). - */ - --#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) --/* Technically wrong, but this avoids compilation errors on some gcc -- versions. */ --#define BITOP_ADDR(x) "=m" (*(volatile long *) (x)) --#else --#define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) --#endif -+#define RLONG_ADDR(x) "m" (*(volatile long *) (x)) -+#define WBYTE_ADDR(x) "+m" (*(volatile char *) (x)) - --#define ADDR BITOP_ADDR(addr) -+#define ADDR RLONG_ADDR(addr) - - /* - * We do the locked ops that don't return the old value as - * a mask operation on a byte. - */ - #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) --#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) -+#define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3)) - #define CONST_MASK(nr) (1 << ((nr) & 7)) - - /** -@@ -79,7 +74,7 @@ set_bit(long nr, volatile unsigned long *addr) - : "memory"); - } else { - asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0" -- : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); -+ : : RLONG_ADDR(addr), "Ir" (nr) : "memory"); - } - } - -@@ -94,7 +89,7 @@ set_bit(long nr, volatile unsigned long *addr) - */ - static __always_inline void __set_bit(long nr, volatile unsigned long *addr) - { -- asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory"); -+ asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); - } - - /** -@@ -116,8 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr) - : "iq" ((u8)~CONST_MASK(nr))); - } else { - asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0" -- : BITOP_ADDR(addr) -- : "Ir" (nr)); -+ : : RLONG_ADDR(addr), "Ir" (nr) : "memory"); - } - } - -@@ -137,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad - - static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) - { -- asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr)); -+ asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); - } - - static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) -@@ -145,7 +139,7 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile - bool negative; - asm volatile(LOCK_PREFIX "andb %2,%1" - CC_SET(s) -- : CC_OUT(s) (negative), ADDR -+ : CC_OUT(s) (negative), WBYTE_ADDR(addr) - : "ir" ((char) ~(1 << nr)) : "memory"); - return negative; - } -@@ -161,13 +155,9 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile - * __clear_bit() is non-atomic and implies release semantics before the memory - * operation. It can be used for an unlock if no other CPUs can concurrently - * modify other bits in the word. -- * -- * No memory barrier is required here, because x86 cannot reorder stores past -- * older loads. Same principle as spin_unlock. - */ - static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) - { -- barrier(); - __clear_bit(nr, addr); - } - -@@ -182,7 +172,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long * - */ - static __always_inline void __change_bit(long nr, volatile unsigned long *addr) - { -- asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr)); -+ asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); - } - - /** -@@ -202,8 +192,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr) - : "iq" ((u8)CONST_MASK(nr))); - } else { - asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0" -- : BITOP_ADDR(addr) -- : "Ir" (nr)); -+ : : RLONG_ADDR(addr), "Ir" (nr) : "memory"); - } - } - -@@ -248,8 +237,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * - - asm(__ASM_SIZE(bts) " %2,%1" - CC_SET(c) -- : CC_OUT(c) (oldbit), ADDR -- : "Ir" (nr)); -+ : CC_OUT(c) (oldbit) -+ : ADDR, "Ir" (nr) : "memory"); - return oldbit; - } - -@@ -288,8 +277,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long - - asm volatile(__ASM_SIZE(btr) " %2,%1" - CC_SET(c) -- : CC_OUT(c) (oldbit), ADDR -- : "Ir" (nr)); -+ : CC_OUT(c) (oldbit) -+ : ADDR, "Ir" (nr) : "memory"); - return oldbit; - } - -@@ -300,8 +289,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon - - asm volatile(__ASM_SIZE(btc) " %2,%1" - CC_SET(c) -- : CC_OUT(c) (oldbit), ADDR -- : "Ir" (nr) : "memory"); -+ : CC_OUT(c) (oldbit) -+ : ADDR, "Ir" (nr) : "memory"); - - return oldbit; - } -@@ -332,7 +321,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l - asm volatile(__ASM_SIZE(bt) " %2,%1" - CC_SET(c) - : CC_OUT(c) (oldbit) -- : "m" (*(unsigned long *)addr), "Ir" (nr)); -+ : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory"); - - return oldbit; - } -diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h -index 55d392c6bd29..2fd165f1cffa 100644 ---- a/arch/x86/include/asm/string_32.h -+++ b/arch/x86/include/asm/string_32.h -@@ -179,14 +179,7 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len) - * No 3D Now! - */ - --#if (__GNUC__ >= 4) - #define memcpy(t, f, n) __builtin_memcpy(t, f, n) --#else --#define memcpy(t, f, n) \ -- (__builtin_constant_p((n)) \ -- ? __constant_memcpy((t), (f), (n)) \ -- : __memcpy((t), (f), (n))) --#endif - - #endif - #endif /* !CONFIG_FORTIFY_SOURCE */ -@@ -282,12 +275,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern, - - { - int d0, d1; --#if __GNUC__ == 4 && __GNUC_MINOR__ == 0 -- /* Workaround for broken gcc 4.0 */ -- register unsigned long eax asm("%eax") = pattern; --#else - unsigned long eax = pattern; --#endif - - switch (count % 4) { - case 0: -@@ -321,15 +309,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern, - #define __HAVE_ARCH_MEMSET - extern void *memset(void *, int, size_t); - #ifndef CONFIG_FORTIFY_SOURCE --#if (__GNUC__ >= 4) - #define memset(s, c, count) __builtin_memset(s, c, count) --#else --#define memset(s, c, count) \ -- (__builtin_constant_p(c) \ -- ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \ -- (count)) \ -- : __memset((s), (c), (count))) --#endif - #endif /* !CONFIG_FORTIFY_SOURCE */ - - #define __HAVE_ARCH_MEMSET16 -diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h -index 4e4194e21a09..75314c3dbe47 100644 ---- a/arch/x86/include/asm/string_64.h -+++ b/arch/x86/include/asm/string_64.h -@@ -14,21 +14,6 @@ - extern void *memcpy(void *to, const void *from, size_t len); - extern void *__memcpy(void *to, const void *from, size_t len); - --#ifndef CONFIG_FORTIFY_SOURCE --#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4 --#define memcpy(dst, src, len) \ --({ \ -- size_t __len = (len); \ -- void *__ret; \ -- if (__builtin_constant_p(len) && __len >= 64) \ -- __ret = __memcpy((dst), (src), __len); \ -- else \ -- __ret = __builtin_memcpy((dst), (src), __len); \ -- __ret; \ --}) --#endif --#endif /* !CONFIG_FORTIFY_SOURCE */ -- - #define __HAVE_ARCH_MEMSET - void *memset(void *s, int c, size_t n); - void *__memset(void *s, int c, size_t n); -diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h -index ef05bea7010d..6b5c710846f5 100644 ---- a/arch/x86/include/asm/xen/hypercall.h -+++ b/arch/x86/include/asm/xen/hypercall.h -@@ -206,6 +206,9 @@ xen_single_call(unsigned int call, - __HYPERCALL_DECLS; - __HYPERCALL_5ARG(a1, a2, a3, a4, a5); - -+ if (call >= PAGE_SIZE / sizeof(hypercall_page[0])) -+ return -EINVAL; -+ - asm volatile(CALL_NOSPEC - : __HYPERCALL_5PARAM - : [thunk_target] "a" (&hypercall_page[call]) -diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c -index f13a3a24d360..a9b8e38d78ad 100644 ---- a/arch/x86/kvm/svm.c -+++ b/arch/x86/kvm/svm.c -@@ -6422,11 +6422,11 @@ e_free: - return ret; - } - --static int get_num_contig_pages(int idx, struct page **inpages, -- unsigned long npages) -+static unsigned long get_num_contig_pages(unsigned long idx, -+ struct page **inpages, unsigned long npages) - { - unsigned long paddr, next_paddr; -- int i = idx + 1, pages = 1; -+ unsigned long i = idx + 1, pages = 1; - - /* find the number of contiguous pages starting from idx */ - paddr = __sme_page_pa(inpages[idx]); -@@ -6445,12 +6445,12 @@ static int get_num_contig_pages(int idx, struct page **inpages, - - static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) - { -- unsigned long vaddr, vaddr_end, next_vaddr, npages, size; -+ unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i; - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - struct kvm_sev_launch_update_data params; - struct sev_data_launch_update_data *data; - struct page **inpages; -- int i, ret, pages; -+ int ret; - - if (!sev_guest(kvm)) - return -ENOTTY; -diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c -index f014e1aeee96..f90b3a948291 100644 ---- a/arch/x86/kvm/vmx/nested.c -+++ b/arch/x86/kvm/vmx/nested.c -@@ -500,6 +500,17 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, - } - } - -+static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) { -+ int msr; -+ -+ for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { -+ unsigned word = msr / BITS_PER_LONG; -+ -+ msr_bitmap[word] = ~0; -+ msr_bitmap[word + (0x800 / sizeof(long))] = ~0; -+ } -+} -+ - /* - * Merge L0's and L1's MSR bitmap, return false to indicate that - * we do not use the hardware. -@@ -541,39 +552,44 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, - return false; - - msr_bitmap_l1 = (unsigned long *)kmap(page); -- if (nested_cpu_has_apic_reg_virt(vmcs12)) { -- /* -- * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it -- * just lets the processor take the value from the virtual-APIC page; -- * take those 256 bits directly from the L1 bitmap. -- */ -- for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { -- unsigned word = msr / BITS_PER_LONG; -- msr_bitmap_l0[word] = msr_bitmap_l1[word]; -- msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0; -- } -- } else { -- for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { -- unsigned word = msr / BITS_PER_LONG; -- msr_bitmap_l0[word] = ~0; -- msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0; -- } -- } - -- nested_vmx_disable_intercept_for_msr( -- msr_bitmap_l1, msr_bitmap_l0, -- X2APIC_MSR(APIC_TASKPRI), -- MSR_TYPE_W); -+ /* -+ * To keep the control flow simple, pay eight 8-byte writes (sixteen -+ * 4-byte writes on 32-bit systems) up front to enable intercepts for -+ * the x2APIC MSR range and selectively disable them below. -+ */ -+ enable_x2apic_msr_intercepts(msr_bitmap_l0); -+ -+ if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { -+ if (nested_cpu_has_apic_reg_virt(vmcs12)) { -+ /* -+ * L0 need not intercept reads for MSRs between 0x800 -+ * and 0x8ff, it just lets the processor take the value -+ * from the virtual-APIC page; take those 256 bits -+ * directly from the L1 bitmap. -+ */ -+ for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { -+ unsigned word = msr / BITS_PER_LONG; -+ -+ msr_bitmap_l0[word] = msr_bitmap_l1[word]; -+ } -+ } - -- if (nested_cpu_has_vid(vmcs12)) { -- nested_vmx_disable_intercept_for_msr( -- msr_bitmap_l1, msr_bitmap_l0, -- X2APIC_MSR(APIC_EOI), -- MSR_TYPE_W); - nested_vmx_disable_intercept_for_msr( - msr_bitmap_l1, msr_bitmap_l0, -- X2APIC_MSR(APIC_SELF_IPI), -- MSR_TYPE_W); -+ X2APIC_MSR(APIC_TASKPRI), -+ MSR_TYPE_R | MSR_TYPE_W); -+ -+ if (nested_cpu_has_vid(vmcs12)) { -+ nested_vmx_disable_intercept_for_msr( -+ msr_bitmap_l1, msr_bitmap_l0, -+ X2APIC_MSR(APIC_EOI), -+ MSR_TYPE_W); -+ nested_vmx_disable_intercept_for_msr( -+ msr_bitmap_l1, msr_bitmap_l0, -+ X2APIC_MSR(APIC_SELF_IPI), -+ MSR_TYPE_W); -+ } - } - - if (spec_ctrl) -diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c -index 174c11f13bba..b9f82510c650 100644 ---- a/arch/xtensa/kernel/stacktrace.c -+++ b/arch/xtensa/kernel/stacktrace.c -@@ -253,10 +253,14 @@ static int return_address_cb(struct stackframe *frame, void *data) - return 1; - } - -+/* -+ * level == 0 is for the return address from the caller of this function, -+ * not from this function itself. -+ */ - unsigned long return_address(unsigned level) - { - struct return_addr_data r = { -- .skip = level + 1, -+ .skip = level, - }; - walk_stackframe(stack_pointer(NULL), return_address_cb, &r); - return r.addr; -diff --git a/block/bio.c b/block/bio.c -index 4db1008309ed..a06f58bd4c72 100644 ---- a/block/bio.c -+++ b/block/bio.c -@@ -1238,8 +1238,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q, - } - } - -- if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) -+ if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) { -+ if (!map_data) -+ __free_page(page); - break; -+ } - - len -= bytes; - offset = 0; -diff --git a/block/blk-core.c b/block/blk-core.c -index 6b78ec56a4f2..5bde73a49399 100644 ---- a/block/blk-core.c -+++ b/block/blk-core.c -@@ -1246,8 +1246,6 @@ static int blk_cloned_rq_check_limits(struct request_queue *q, - */ - blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) - { -- blk_qc_t unused; -- - if (blk_cloned_rq_check_limits(q, rq)) - return BLK_STS_IOERR; - -@@ -1263,7 +1261,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * - * bypass a potential scheduler on the bottom device for - * insert. - */ -- return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, true); -+ return blk_mq_request_issue_directly(rq, true); - } - EXPORT_SYMBOL_GPL(blk_insert_cloned_request); - -diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c -index 140933e4a7d1..0c98b6c1ca49 100644 ---- a/block/blk-mq-sched.c -+++ b/block/blk-mq-sched.c -@@ -423,10 +423,12 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, - * busy in case of 'none' scheduler, and this way may save - * us one extra enqueue & dequeue to sw queue. - */ -- if (!hctx->dispatch_busy && !e && !run_queue_async) -+ if (!hctx->dispatch_busy && !e && !run_queue_async) { - blk_mq_try_issue_list_directly(hctx, list); -- else -- blk_mq_insert_requests(hctx, ctx, list); -+ if (list_empty(list)) -+ return; -+ } -+ blk_mq_insert_requests(hctx, ctx, list); - } - - blk_mq_run_hw_queue(hctx, run_queue_async); -diff --git a/block/blk-mq.c b/block/blk-mq.c -index b9283b63d116..16f9675c57e6 100644 ---- a/block/blk-mq.c -+++ b/block/blk-mq.c -@@ -1805,74 +1805,76 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, - return ret; - } - --blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, -+static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, - struct request *rq, - blk_qc_t *cookie, -- bool bypass, bool last) -+ bool bypass_insert, bool last) - { - struct request_queue *q = rq->q; - bool run_queue = true; -- blk_status_t ret = BLK_STS_RESOURCE; -- int srcu_idx; -- bool force = false; - -- hctx_lock(hctx, &srcu_idx); - /* -- * hctx_lock is needed before checking quiesced flag. -+ * RCU or SRCU read lock is needed before checking quiesced flag. - * -- * When queue is stopped or quiesced, ignore 'bypass', insert -- * and return BLK_STS_OK to caller, and avoid driver to try to -- * dispatch again. -+ * When queue is stopped or quiesced, ignore 'bypass_insert' from -+ * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller, -+ * and avoid driver to try to dispatch again. - */ -- if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) { -+ if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { - run_queue = false; -- bypass = false; -- goto out_unlock; -+ bypass_insert = false; -+ goto insert; - } - -- if (unlikely(q->elevator && !bypass)) -- goto out_unlock; -+ if (q->elevator && !bypass_insert) -+ goto insert; - - if (!blk_mq_get_dispatch_budget(hctx)) -- goto out_unlock; -+ goto insert; - - if (!blk_mq_get_driver_tag(rq)) { - blk_mq_put_dispatch_budget(hctx); -- goto out_unlock; -+ goto insert; - } - -- /* -- * Always add a request that has been through -- *.queue_rq() to the hardware dispatch list. -- */ -- force = true; -- ret = __blk_mq_issue_directly(hctx, rq, cookie, last); --out_unlock: -+ return __blk_mq_issue_directly(hctx, rq, cookie, last); -+insert: -+ if (bypass_insert) -+ return BLK_STS_RESOURCE; -+ -+ blk_mq_request_bypass_insert(rq, run_queue); -+ return BLK_STS_OK; -+} -+ -+static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, -+ struct request *rq, blk_qc_t *cookie) -+{ -+ blk_status_t ret; -+ int srcu_idx; -+ -+ might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); -+ -+ hctx_lock(hctx, &srcu_idx); -+ -+ ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); -+ if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) -+ blk_mq_request_bypass_insert(rq, true); -+ else if (ret != BLK_STS_OK) -+ blk_mq_end_request(rq, ret); -+ -+ hctx_unlock(hctx, srcu_idx); -+} -+ -+blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) -+{ -+ blk_status_t ret; -+ int srcu_idx; -+ blk_qc_t unused_cookie; -+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx; -+ -+ hctx_lock(hctx, &srcu_idx); -+ ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last); - hctx_unlock(hctx, srcu_idx); -- switch (ret) { -- case BLK_STS_OK: -- break; -- case BLK_STS_DEV_RESOURCE: -- case BLK_STS_RESOURCE: -- if (force) { -- blk_mq_request_bypass_insert(rq, run_queue); -- /* -- * We have to return BLK_STS_OK for the DM -- * to avoid livelock. Otherwise, we return -- * the real result to indicate whether the -- * request is direct-issued successfully. -- */ -- ret = bypass ? BLK_STS_OK : ret; -- } else if (!bypass) { -- blk_mq_sched_insert_request(rq, false, -- run_queue, false); -- } -- break; -- default: -- if (!bypass) -- blk_mq_end_request(rq, ret); -- break; -- } - - return ret; - } -@@ -1880,20 +1882,22 @@ out_unlock: - void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, - struct list_head *list) - { -- blk_qc_t unused; -- blk_status_t ret = BLK_STS_OK; -- - while (!list_empty(list)) { -+ blk_status_t ret; - struct request *rq = list_first_entry(list, struct request, - queuelist); - - list_del_init(&rq->queuelist); -- if (ret == BLK_STS_OK) -- ret = blk_mq_try_issue_directly(hctx, rq, &unused, -- false, -+ ret = blk_mq_request_issue_directly(rq, list_empty(list)); -+ if (ret != BLK_STS_OK) { -+ if (ret == BLK_STS_RESOURCE || -+ ret == BLK_STS_DEV_RESOURCE) { -+ blk_mq_request_bypass_insert(rq, - list_empty(list)); -- else -- blk_mq_sched_insert_request(rq, false, true, false); -+ break; -+ } -+ blk_mq_end_request(rq, ret); -+ } - } - - /* -@@ -1901,7 +1905,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, - * the driver there was more coming, but that turned out to - * be a lie. - */ -- if (ret != BLK_STS_OK && hctx->queue->mq_ops->commit_rqs) -+ if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs) - hctx->queue->mq_ops->commit_rqs(hctx); - } - -@@ -2014,13 +2018,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) - if (same_queue_rq) { - data.hctx = same_queue_rq->mq_hctx; - blk_mq_try_issue_directly(data.hctx, same_queue_rq, -- &cookie, false, true); -+ &cookie); - } - } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator && - !data.hctx->dispatch_busy)) { - blk_mq_put_ctx(data.ctx); - blk_mq_bio_to_request(rq, bio); -- blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true); -+ blk_mq_try_issue_directly(data.hctx, rq, &cookie); - } else { - blk_mq_put_ctx(data.ctx); - blk_mq_bio_to_request(rq, bio); -diff --git a/block/blk-mq.h b/block/blk-mq.h -index d0b3dd54ef8d..a3a684a8c633 100644 ---- a/block/blk-mq.h -+++ b/block/blk-mq.h -@@ -67,10 +67,8 @@ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue); - void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, - struct list_head *list); - --blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, -- struct request *rq, -- blk_qc_t *cookie, -- bool bypass, bool last); -+/* Used by blk_insert_cloned_request() to issue request directly */ -+blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last); - void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, - struct list_head *list); - -diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c -index e10fec99a182..4424997ecf30 100644 ---- a/drivers/acpi/acpica/evgpe.c -+++ b/drivers/acpi/acpica/evgpe.c -@@ -81,8 +81,12 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) - - ACPI_FUNCTION_TRACE(ev_enable_gpe); - -- /* Enable the requested GPE */ -+ /* Clear the GPE status */ -+ status = acpi_hw_clear_gpe(gpe_event_info); -+ if (ACPI_FAILURE(status)) -+ return_ACPI_STATUS(status); - -+ /* Enable the requested GPE */ - status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); - return_ACPI_STATUS(status); - } -diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c -index 8638f43cfc3d..79d86da1c892 100644 ---- a/drivers/acpi/acpica/nsobject.c -+++ b/drivers/acpi/acpica/nsobject.c -@@ -186,6 +186,10 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node) - } - } - -+ if (obj_desc->common.type == ACPI_TYPE_REGION) { -+ acpi_ut_remove_address_range(obj_desc->region.space_id, node); -+ } -+ - /* Clear the Node entry in all cases */ - - node->object = NULL; -diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig -index 2e2ffe7010aa..51c77f0e47b2 100644 ---- a/drivers/char/Kconfig -+++ b/drivers/char/Kconfig -@@ -351,7 +351,7 @@ config XILINX_HWICAP - - config R3964 - tristate "Siemens R3964 line discipline" -- depends on TTY -+ depends on TTY && BROKEN - ---help--- - This driver allows synchronous communication with devices using the - Siemens R3964 packet protocol. Unless you are dealing with special -diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c -index 258c8d259ea1..f965845917e3 100644 ---- a/drivers/clk/meson/meson-aoclk.c -+++ b/drivers/clk/meson/meson-aoclk.c -@@ -65,20 +65,15 @@ int meson_aoclkc_probe(struct platform_device *pdev) - return ret; - } - -- /* Populate regmap */ -- for (clkid = 0; clkid < data->num_clks; clkid++) -+ /* -+ * Populate regmap and register all clks -+ */ -+ for (clkid = 0; clkid < data->num_clks; clkid++) { - data->clks[clkid]->map = regmap; - -- /* Register all clks */ -- for (clkid = 0; clkid < data->hw_data->num; clkid++) { -- if (!data->hw_data->hws[clkid]) -- continue; -- - ret = devm_clk_hw_register(dev, data->hw_data->hws[clkid]); -- if (ret) { -- dev_err(dev, "Clock registration failed\n"); -+ if (ret) - return ret; -- } - } - - return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, -diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c -index c7103dd2d8d5..563ab8590061 100644 ---- a/drivers/gpu/drm/i915/gvt/gtt.c -+++ b/drivers/gpu/drm/i915/gvt/gtt.c -@@ -1942,7 +1942,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) - */ - void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) - { -- atomic_dec(&mm->pincount); -+ atomic_dec_if_positive(&mm->pincount); - } - - /** -diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c -index 55bb7885e228..8fff49affc11 100644 ---- a/drivers/gpu/drm/i915/gvt/scheduler.c -+++ b/drivers/gpu/drm/i915/gvt/scheduler.c -@@ -1475,8 +1475,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, - intel_runtime_pm_put(dev_priv); - } - -- if (ret && (vgpu_is_vm_unhealthy(ret))) { -- enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); -+ if (ret) { -+ if (vgpu_is_vm_unhealthy(ret)) -+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); - intel_vgpu_destroy_workload(workload); - return ERR_PTR(ret); - } -diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c -index 22a74608c6e4..dcd1df5322e8 100644 ---- a/drivers/gpu/drm/i915/intel_dp.c -+++ b/drivers/gpu/drm/i915/intel_dp.c -@@ -1845,42 +1845,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, - return false; - } - --/* Optimize link config in order: max bpp, min lanes, min clock */ --static bool --intel_dp_compute_link_config_fast(struct intel_dp *intel_dp, -- struct intel_crtc_state *pipe_config, -- const struct link_config_limits *limits) --{ -- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; -- int bpp, clock, lane_count; -- int mode_rate, link_clock, link_avail; -- -- for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { -- mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, -- bpp); -- -- for (lane_count = limits->min_lane_count; -- lane_count <= limits->max_lane_count; -- lane_count <<= 1) { -- for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { -- link_clock = intel_dp->common_rates[clock]; -- link_avail = intel_dp_max_data_rate(link_clock, -- lane_count); -- -- if (mode_rate <= link_avail) { -- pipe_config->lane_count = lane_count; -- pipe_config->pipe_bpp = bpp; -- pipe_config->port_clock = link_clock; -- -- return true; -- } -- } -- } -- } -- -- return false; --} -- - static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) - { - int i, num_bpc; -@@ -2013,15 +1977,13 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, - limits.min_bpp = 6 * 3; - limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); - -- if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) { -+ if (intel_dp_is_edp(intel_dp)) { - /* - * Use the maximum clock and number of lanes the eDP panel -- * advertizes being capable of. The eDP 1.3 and earlier panels -- * are generally designed to support only a single clock and -- * lane configuration, and typically these values correspond to -- * the native resolution of the panel. With eDP 1.4 rate select -- * and DSC, this is decreasingly the case, and we need to be -- * able to select less than maximum link config. -+ * advertizes being capable of. The panels are generally -+ * designed to support only a single clock and lane -+ * configuration, and typically these values correspond to the -+ * native resolution of the panel. - */ - limits.min_lane_count = limits.max_lane_count; - limits.min_clock = limits.max_clock; -@@ -2035,22 +1997,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, - intel_dp->common_rates[limits.max_clock], - limits.max_bpp, adjusted_mode->crtc_clock); - -- if (intel_dp_is_edp(intel_dp)) -- /* -- * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4 -- * section A.1: "It is recommended that the minimum number of -- * lanes be used, using the minimum link rate allowed for that -- * lane configuration." -- * -- * Note that we use the max clock and lane count for eDP 1.3 and -- * earlier, and fast vs. wide is irrelevant. -- */ -- ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, -- &limits); -- else -- /* Optimize for slow and wide. */ -- ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, -- &limits); -+ /* -+ * Optimize for slow and wide. This is the place to add alternative -+ * optimization policy. -+ */ -+ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); - - /* enable compression if the mode doesn't fit available BW */ - if (!ret) { -diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c -index dc47720c99ba..39d8509d96a0 100644 ---- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c -+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c -@@ -48,8 +48,13 @@ static enum drm_mode_status - sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector, - const struct drm_display_mode *mode) - { -- /* This is max for HDMI 2.0b (4K@60Hz) */ -- if (mode->clock > 594000) -+ /* -+ * Controller support maximum of 594 MHz, which correlates to -+ * 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than -+ * 340 MHz scrambling has to be enabled. Because scrambling is -+ * not yet implemented, just limit to 340 MHz for now. -+ */ -+ if (mode->clock > 340000) - return MODE_CLOCK_HIGH; - - return MODE_OK; -diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c -index a63e3011e971..bd4f0b88bbd7 100644 ---- a/drivers/gpu/drm/udl/udl_drv.c -+++ b/drivers/gpu/drm/udl/udl_drv.c -@@ -51,6 +51,7 @@ static struct drm_driver driver = { - .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, - .load = udl_driver_load, - .unload = udl_driver_unload, -+ .release = udl_driver_release, - - /* gem hooks */ - .gem_free_object_unlocked = udl_gem_free_object, -diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h -index e9e9b1ff678e..4ae67d882eae 100644 ---- a/drivers/gpu/drm/udl/udl_drv.h -+++ b/drivers/gpu/drm/udl/udl_drv.h -@@ -104,6 +104,7 @@ void udl_urb_completion(struct urb *urb); - - int udl_driver_load(struct drm_device *dev, unsigned long flags); - void udl_driver_unload(struct drm_device *dev); -+void udl_driver_release(struct drm_device *dev); - - int udl_fbdev_init(struct drm_device *dev); - void udl_fbdev_cleanup(struct drm_device *dev); -diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c -index 1b014d92855b..19055dda3140 100644 ---- a/drivers/gpu/drm/udl/udl_main.c -+++ b/drivers/gpu/drm/udl/udl_main.c -@@ -378,6 +378,12 @@ void udl_driver_unload(struct drm_device *dev) - udl_free_urb_list(dev); - - udl_fbdev_cleanup(dev); -- udl_modeset_cleanup(dev); - kfree(udl); - } -+ -+void udl_driver_release(struct drm_device *dev) -+{ -+ udl_modeset_cleanup(dev); -+ drm_dev_fini(dev); -+ kfree(dev); -+} -diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c -index f39a183d59c2..e7e946035027 100644 ---- a/drivers/gpu/drm/virtio/virtgpu_object.c -+++ b/drivers/gpu/drm/virtio/virtgpu_object.c -@@ -28,10 +28,21 @@ - static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, - uint32_t *resid) - { -+#if 0 - int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); - - if (handle < 0) - return handle; -+#else -+ static int handle; -+ -+ /* -+ * FIXME: dirty hack to avoid re-using IDs, virglrenderer -+ * can't deal with that. Needs fixing in virglrenderer, also -+ * should figure a better way to handle that in the guest. -+ */ -+ handle++; -+#endif - - *resid = handle + 1; - return 0; -@@ -39,7 +50,9 @@ static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, - - static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id) - { -+#if 0 - ida_free(&vgdev->resource_ida, id - 1); -+#endif - } - - static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) -diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c -index 15ed6177a7a3..f040c8a7f9a9 100644 ---- a/drivers/hid/hid-logitech-hidpp.c -+++ b/drivers/hid/hid-logitech-hidpp.c -@@ -2608,8 +2608,9 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size) - input_report_rel(mydata->input, REL_Y, v); - - v = hid_snto32(data[6], 8); -- hidpp_scroll_counter_handle_scroll( -- &hidpp->vertical_wheel_counter, v); -+ if (v != 0) -+ hidpp_scroll_counter_handle_scroll( -+ &hidpp->vertical_wheel_counter, v); - - input_sync(mydata->input); - } -diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig -index 6f929bfa9fcd..d0f1dfe2bcbb 100644 ---- a/drivers/hwmon/Kconfig -+++ b/drivers/hwmon/Kconfig -@@ -1759,6 +1759,7 @@ config SENSORS_VT8231 - config SENSORS_W83773G - tristate "Nuvoton W83773G" - depends on I2C -+ select REGMAP_I2C - help - If you say yes here you get support for the Nuvoton W83773G hardware - monitoring chip. -diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c -index 391118c8aae8..c888f4aca45c 100644 ---- a/drivers/hwmon/occ/common.c -+++ b/drivers/hwmon/occ/common.c -@@ -889,6 +889,8 @@ static int occ_setup_sensor_attrs(struct occ *occ) - s++; - } - } -+ -+ s = (sensors->power.num_sensors * 4) + 1; - } else { - for (i = 0; i < sensors->power.num_sensors; ++i) { - s = i + 1; -@@ -917,11 +919,11 @@ static int occ_setup_sensor_attrs(struct occ *occ) - show_power, NULL, 3, i); - attr++; - } -- } - -- if (sensors->caps.num_sensors >= 1) { - s = sensors->power.num_sensors + 1; -+ } - -+ if (sensors->caps.num_sensors >= 1) { - snprintf(attr->name, sizeof(attr->name), "power%d_label", s); - attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, - 0, 0); -diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c -index 4ee32964e1dd..948eb6e25219 100644 ---- a/drivers/infiniband/hw/mlx5/odp.c -+++ b/drivers/infiniband/hw/mlx5/odp.c -@@ -560,7 +560,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, - struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem); - bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE; - bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH; -- u64 access_mask = ODP_READ_ALLOWED_BIT; -+ u64 access_mask; - u64 start_idx, page_mask; - struct ib_umem_odp *odp; - size_t size; -@@ -582,6 +582,7 @@ next_mr: - page_shift = mr->umem->page_shift; - page_mask = ~(BIT(page_shift) - 1); - start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift; -+ access_mask = ODP_READ_ALLOWED_BIT; - - if (prefetch && !downgrade && !mr->umem->writable) { - /* prefetch with write-access must -diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h -index 95c6d86ab5e8..c4ef1fceead6 100644 ---- a/drivers/md/dm-core.h -+++ b/drivers/md/dm-core.h -@@ -115,6 +115,7 @@ struct mapped_device { - struct srcu_struct io_barrier; - }; - -+void disable_discard(struct mapped_device *md); - void disable_write_same(struct mapped_device *md); - void disable_write_zeroes(struct mapped_device *md); - -diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c -index 2e823252d797..f535fd8ac82d 100644 ---- a/drivers/md/dm-integrity.c -+++ b/drivers/md/dm-integrity.c -@@ -913,7 +913,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig - static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2) - { - return range1->logical_sector < range2->logical_sector + range2->n_sectors && -- range2->logical_sector + range2->n_sectors > range2->logical_sector; -+ range1->logical_sector + range1->n_sectors > range2->logical_sector; - } - - static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting) -@@ -959,8 +959,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity - struct dm_integrity_range *last_range = - list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry); - struct task_struct *last_range_task; -- if (!ranges_overlap(range, last_range)) -- break; - last_range_task = last_range->task; - list_del(&last_range->wait_entry); - if (!add_new_range(ic, last_range, false)) { -@@ -3185,7 +3183,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) - journal_watermark = val; - else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1) - sync_msec = val; -- else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) { -+ else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) { - if (ic->meta_dev) { - dm_put_device(ti, ic->meta_dev); - ic->meta_dev = NULL; -@@ -3204,17 +3202,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) - goto bad; - } - ic->sectors_per_block = val >> SECTOR_SHIFT; -- } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { -+ } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { - r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, - "Invalid internal_hash argument"); - if (r) - goto bad; -- } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) { -+ } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) { - r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, - "Invalid journal_crypt argument"); - if (r) - goto bad; -- } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { -+ } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { - r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, - "Invalid journal_mac argument"); - if (r) -diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c -index a20531e5f3b4..582265e043a6 100644 ---- a/drivers/md/dm-rq.c -+++ b/drivers/md/dm-rq.c -@@ -206,11 +206,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped) - } - - if (unlikely(error == BLK_STS_TARGET)) { -- if (req_op(clone) == REQ_OP_WRITE_SAME && -- !clone->q->limits.max_write_same_sectors) -+ if (req_op(clone) == REQ_OP_DISCARD && -+ !clone->q->limits.max_discard_sectors) -+ disable_discard(tio->md); -+ else if (req_op(clone) == REQ_OP_WRITE_SAME && -+ !clone->q->limits.max_write_same_sectors) - disable_write_same(tio->md); -- if (req_op(clone) == REQ_OP_WRITE_ZEROES && -- !clone->q->limits.max_write_zeroes_sectors) -+ else if (req_op(clone) == REQ_OP_WRITE_ZEROES && -+ !clone->q->limits.max_write_zeroes_sectors) - disable_write_zeroes(tio->md); - } - -diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c -index 4b1be754cc41..eb257e4dcb1c 100644 ---- a/drivers/md/dm-table.c -+++ b/drivers/md/dm-table.c -@@ -1852,6 +1852,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t) - return true; - } - -+static int device_requires_stable_pages(struct dm_target *ti, -+ struct dm_dev *dev, sector_t start, -+ sector_t len, void *data) -+{ -+ struct request_queue *q = bdev_get_queue(dev->bdev); -+ -+ return q && bdi_cap_stable_pages_required(q->backing_dev_info); -+} -+ -+/* -+ * If any underlying device requires stable pages, a table must require -+ * them as well. Only targets that support iterate_devices are considered: -+ * don't want error, zero, etc to require stable pages. -+ */ -+static bool dm_table_requires_stable_pages(struct dm_table *t) -+{ -+ struct dm_target *ti; -+ unsigned i; -+ -+ for (i = 0; i < dm_table_get_num_targets(t); i++) { -+ ti = dm_table_get_target(t, i); -+ -+ if (ti->type->iterate_devices && -+ ti->type->iterate_devices(ti, device_requires_stable_pages, NULL)) -+ return true; -+ } -+ -+ return false; -+} -+ - void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, - struct queue_limits *limits) - { -@@ -1909,6 +1939,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, - - dm_table_verify_integrity(t); - -+ /* -+ * Some devices don't use blk_integrity but still want stable pages -+ * because they do their own checksumming. -+ */ -+ if (dm_table_requires_stable_pages(t)) -+ q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; -+ else -+ q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; -+ - /* - * Determine whether or not this queue's I/O timings contribute - * to the entropy pool, Only request-based targets use this. -diff --git a/drivers/md/dm.c b/drivers/md/dm.c -index 515e6af9bed2..4986eea520b6 100644 ---- a/drivers/md/dm.c -+++ b/drivers/md/dm.c -@@ -963,6 +963,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error) - } - } - -+void disable_discard(struct mapped_device *md) -+{ -+ struct queue_limits *limits = dm_get_queue_limits(md); -+ -+ /* device doesn't really support DISCARD, disable it */ -+ limits->max_discard_sectors = 0; -+ blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); -+} -+ - void disable_write_same(struct mapped_device *md) - { - struct queue_limits *limits = dm_get_queue_limits(md); -@@ -988,11 +997,14 @@ static void clone_endio(struct bio *bio) - dm_endio_fn endio = tio->ti->type->end_io; - - if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { -- if (bio_op(bio) == REQ_OP_WRITE_SAME && -- !bio->bi_disk->queue->limits.max_write_same_sectors) -+ if (bio_op(bio) == REQ_OP_DISCARD && -+ !bio->bi_disk->queue->limits.max_discard_sectors) -+ disable_discard(md); -+ else if (bio_op(bio) == REQ_OP_WRITE_SAME && -+ !bio->bi_disk->queue->limits.max_write_same_sectors) - disable_write_same(md); -- if (bio_op(bio) == REQ_OP_WRITE_ZEROES && -- !bio->bi_disk->queue->limits.max_write_zeroes_sectors) -+ else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && -+ !bio->bi_disk->queue->limits.max_write_zeroes_sectors) - disable_write_zeroes(md); - } - -@@ -1060,15 +1072,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) - return -EINVAL; - } - -- /* -- * BIO based queue uses its own splitting. When multipage bvecs -- * is switched on, size of the incoming bio may be too big to -- * be handled in some targets, such as crypt. -- * -- * When these targets are ready for the big bio, we can remove -- * the limit. -- */ -- ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE); -+ ti->max_io_len = (uint32_t) len; - - return 0; - } -diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c -index 82a97866e0cf..7c8f203f9a24 100644 ---- a/drivers/mmc/host/alcor.c -+++ b/drivers/mmc/host/alcor.c -@@ -48,7 +48,6 @@ struct alcor_sdmmc_host { - struct mmc_command *cmd; - struct mmc_data *data; - unsigned int dma_on:1; -- unsigned int early_data:1; - - struct mutex cmd_mutex; - -@@ -144,8 +143,7 @@ static void alcor_data_set_dma(struct alcor_sdmmc_host *host) - host->sg_count--; - } - --static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host, -- bool early) -+static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host) - { - struct alcor_pci_priv *priv = host->alcor_pci; - struct mmc_data *data = host->data; -@@ -155,13 +153,6 @@ static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host, - ctrl |= AU6601_DATA_WRITE; - - if (data->host_cookie == COOKIE_MAPPED) { -- if (host->early_data) { -- host->early_data = false; -- return; -- } -- -- host->early_data = early; -- - alcor_data_set_dma(host); - ctrl |= AU6601_DATA_DMA_MODE; - host->dma_on = 1; -@@ -231,6 +222,7 @@ static void alcor_prepare_sg_miter(struct alcor_sdmmc_host *host) - static void alcor_prepare_data(struct alcor_sdmmc_host *host, - struct mmc_command *cmd) - { -+ struct alcor_pci_priv *priv = host->alcor_pci; - struct mmc_data *data = cmd->data; - - if (!data) -@@ -248,7 +240,7 @@ static void alcor_prepare_data(struct alcor_sdmmc_host *host, - if (data->host_cookie != COOKIE_MAPPED) - alcor_prepare_sg_miter(host); - -- alcor_trigger_data_transfer(host, true); -+ alcor_write8(priv, 0, AU6601_DATA_XFER_CTRL); - } - - static void alcor_send_cmd(struct alcor_sdmmc_host *host, -@@ -435,7 +427,7 @@ static int alcor_cmd_irq_done(struct alcor_sdmmc_host *host, u32 intmask) - if (!host->data) - return false; - -- alcor_trigger_data_transfer(host, false); -+ alcor_trigger_data_transfer(host); - host->cmd = NULL; - return true; - } -@@ -456,7 +448,7 @@ static void alcor_cmd_irq_thread(struct alcor_sdmmc_host *host, u32 intmask) - if (!host->data) - alcor_request_complete(host, 1); - else -- alcor_trigger_data_transfer(host, false); -+ alcor_trigger_data_transfer(host); - host->cmd = NULL; - } - -@@ -487,15 +479,9 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask) - break; - case AU6601_INT_READ_BUF_RDY: - alcor_trf_block_pio(host, true); -- if (!host->blocks) -- break; -- alcor_trigger_data_transfer(host, false); - return 1; - case AU6601_INT_WRITE_BUF_RDY: - alcor_trf_block_pio(host, false); -- if (!host->blocks) -- break; -- alcor_trigger_data_transfer(host, false); - return 1; - case AU6601_INT_DMA_END: - if (!host->sg_count) -@@ -508,8 +494,14 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask) - break; - } - -- if (intmask & AU6601_INT_DATA_END) -- return 0; -+ if (intmask & AU6601_INT_DATA_END) { -+ if (!host->dma_on && host->blocks) { -+ alcor_trigger_data_transfer(host); -+ return 1; -+ } else { -+ return 0; -+ } -+ } - - return 1; - } -diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c -index c11c18a9aacb..9ec300ec94ba 100644 ---- a/drivers/mmc/host/sdhci-omap.c -+++ b/drivers/mmc/host/sdhci-omap.c -@@ -797,6 +797,43 @@ void sdhci_omap_reset(struct sdhci_host *host, u8 mask) - sdhci_reset(host, mask); - } - -+#define CMD_ERR_MASK (SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX |\ -+ SDHCI_INT_TIMEOUT) -+#define CMD_MASK (CMD_ERR_MASK | SDHCI_INT_RESPONSE) -+ -+static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask) -+{ -+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); -+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); -+ -+ if (omap_host->is_tuning && host->cmd && !host->data_early && -+ (intmask & CMD_ERR_MASK)) { -+ -+ /* -+ * Since we are not resetting data lines during tuning -+ * operation, data error or data complete interrupts -+ * might still arrive. Mark this request as a failure -+ * but still wait for the data interrupt -+ */ -+ if (intmask & SDHCI_INT_TIMEOUT) -+ host->cmd->error = -ETIMEDOUT; -+ else -+ host->cmd->error = -EILSEQ; -+ -+ host->cmd = NULL; -+ -+ /* -+ * Sometimes command error interrupts and command complete -+ * interrupt will arrive together. Clear all command related -+ * interrupts here. -+ */ -+ sdhci_writel(host, intmask & CMD_MASK, SDHCI_INT_STATUS); -+ intmask &= ~CMD_MASK; -+ } -+ -+ return intmask; -+} -+ - static struct sdhci_ops sdhci_omap_ops = { - .set_clock = sdhci_omap_set_clock, - .set_power = sdhci_omap_set_power, -@@ -807,6 +844,7 @@ static struct sdhci_ops sdhci_omap_ops = { - .platform_send_init_74_clocks = sdhci_omap_init_74_clocks, - .reset = sdhci_omap_reset, - .set_uhs_signaling = sdhci_omap_set_uhs_signaling, -+ .irq = sdhci_omap_irq, - }; - - static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host) -diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c -index 803f7990d32b..40ca339ec3df 100644 ---- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c -+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c -@@ -1129,6 +1129,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, - tpa_info = &rxr->rx_tpa[agg_id]; - - if (unlikely(cons != rxr->rx_next_cons)) { -+ netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n", -+ cons, rxr->rx_next_cons); - bnxt_sched_reset(bp, rxr); - return; - } -@@ -1581,15 +1583,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, - } - - cons = rxcmp->rx_cmp_opaque; -- rx_buf = &rxr->rx_buf_ring[cons]; -- data = rx_buf->data; -- data_ptr = rx_buf->data_ptr; - if (unlikely(cons != rxr->rx_next_cons)) { - int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp); - -+ netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", -+ cons, rxr->rx_next_cons); - bnxt_sched_reset(bp, rxr); - return rc1; - } -+ rx_buf = &rxr->rx_buf_ring[cons]; -+ data = rx_buf->data; -+ data_ptr = rx_buf->data_ptr; - prefetch(data_ptr); - - misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); -@@ -1606,11 +1610,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, - - rx_buf->data = NULL; - if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { -+ u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); -+ - bnxt_reuse_rx_data(rxr, cons, data); - if (agg_bufs) - bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); - - rc = -EIO; -+ if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { -+ netdev_warn(bp->dev, "RX buffer error %x\n", rx_err); -+ bnxt_sched_reset(bp, rxr); -+ } - goto next_rx; - } - -diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c -index 503cfadff4ac..d4ee9f9c8c34 100644 ---- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c -+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c -@@ -1328,10 +1328,11 @@ int nicvf_stop(struct net_device *netdev) - struct nicvf_cq_poll *cq_poll = NULL; - union nic_mbx mbx = {}; - -- cancel_delayed_work_sync(&nic->link_change_work); -- - /* wait till all queued set_rx_mode tasks completes */ -- drain_workqueue(nic->nicvf_rx_mode_wq); -+ if (nic->nicvf_rx_mode_wq) { -+ cancel_delayed_work_sync(&nic->link_change_work); -+ drain_workqueue(nic->nicvf_rx_mode_wq); -+ } - - mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; - nicvf_send_msg_to_pf(nic, &mbx); -@@ -1452,7 +1453,8 @@ int nicvf_open(struct net_device *netdev) - struct nicvf_cq_poll *cq_poll = NULL; - - /* wait till all queued set_rx_mode tasks completes if any */ -- drain_workqueue(nic->nicvf_rx_mode_wq); -+ if (nic->nicvf_rx_mode_wq) -+ drain_workqueue(nic->nicvf_rx_mode_wq); - - netif_carrier_off(netdev); - -@@ -1550,10 +1552,12 @@ int nicvf_open(struct net_device *netdev) - /* Send VF config done msg to PF */ - nicvf_send_cfg_done(nic); - -- INIT_DELAYED_WORK(&nic->link_change_work, -- nicvf_link_status_check_task); -- queue_delayed_work(nic->nicvf_rx_mode_wq, -- &nic->link_change_work, 0); -+ if (nic->nicvf_rx_mode_wq) { -+ INIT_DELAYED_WORK(&nic->link_change_work, -+ nicvf_link_status_check_task); -+ queue_delayed_work(nic->nicvf_rx_mode_wq, -+ &nic->link_change_work, 0); -+ } - - return 0; - cleanup: -diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c -index 5ecbb1adcf3b..51cfe95f3e24 100644 ---- a/drivers/net/ethernet/ibm/ibmvnic.c -+++ b/drivers/net/ethernet/ibm/ibmvnic.c -@@ -1885,6 +1885,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, - */ - adapter->state = VNIC_PROBED; - -+ reinit_completion(&adapter->init_done); - rc = init_crq_queue(adapter); - if (rc) { - netdev_err(adapter->netdev, -@@ -4625,7 +4626,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter) - old_num_rx_queues = adapter->req_rx_queues; - old_num_tx_queues = adapter->req_tx_queues; - -- init_completion(&adapter->init_done); -+ reinit_completion(&adapter->init_done); - adapter->init_done_rc = 0; - ibmvnic_send_crq_init(adapter); - if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { -@@ -4680,7 +4681,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) - - adapter->from_passive_init = false; - -- init_completion(&adapter->init_done); - adapter->init_done_rc = 0; - ibmvnic_send_crq_init(adapter); - if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { -@@ -4759,6 +4759,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) - INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); - INIT_LIST_HEAD(&adapter->rwi_list); - spin_lock_init(&adapter->rwi_lock); -+ init_completion(&adapter->init_done); - adapter->resetting = false; - - adapter->mac_change_pending = false; -diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c -index eac245a93f91..4ab0d030b544 100644 ---- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c -+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c -@@ -122,7 +122,9 @@ out: - return err; - } - --/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */ -+/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) -+ * minimum speed value is 40Gbps -+ */ - static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) - { - u32 speed; -@@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) - int err; - - err = mlx5e_port_linkspeed(priv->mdev, &speed); -- if (err) { -- mlx5_core_warn(priv->mdev, "cannot get port speed\n"); -- return 0; -- } -+ if (err) -+ speed = SPEED_40000; -+ speed = max_t(u32, speed, SPEED_40000); - - xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; - -@@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) - } - - static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, -- u32 xoff, unsigned int mtu) -+ u32 xoff, unsigned int max_mtu) - { - int i; - -@@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, - } - - if (port_buffer->buffer[i].size < -- (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) -+ (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) - return -ENOMEM; - - port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff; -- port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu; -+ port_buffer->buffer[i].xon = -+ port_buffer->buffer[i].xoff - max_mtu; - } - - return 0; -@@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, - - /** - * update_buffer_lossy() -- * mtu: device's MTU -+ * max_mtu: netdev's max_mtu - * pfc_en: current pfc configuration - * buffer: current prio to buffer mapping - * xoff: xoff value -@@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, - * Return 0 if no error. - * Set change to true if buffer configuration is modified. - */ --static int update_buffer_lossy(unsigned int mtu, -+static int update_buffer_lossy(unsigned int max_mtu, - u8 pfc_en, u8 *buffer, u32 xoff, - struct mlx5e_port_buffer *port_buffer, - bool *change) -@@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu, - } - - if (changed) { -- err = update_xoff_threshold(port_buffer, xoff, mtu); -+ err = update_xoff_threshold(port_buffer, xoff, max_mtu); - if (err) - return err; - -@@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu, - return 0; - } - -+#define MINIMUM_MAX_MTU 9216 - int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, - u32 change, unsigned int mtu, - struct ieee_pfc *pfc, -@@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, - bool update_prio2buffer = false; - u8 buffer[MLX5E_MAX_PRIORITY]; - bool update_buffer = false; -+ unsigned int max_mtu; - u32 total_used = 0; - u8 curr_pfc_en; - int err; - int i; - - mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change); -+ max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU); - - err = mlx5e_port_query_buffer(priv, &port_buffer); - if (err) -@@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, - - if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { - update_buffer = true; -- err = update_xoff_threshold(&port_buffer, xoff, mtu); -+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu); - if (err) - return err; - } -@@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, - if (err) - return err; - -- err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff, -+ err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, - &port_buffer, &update_buffer); - if (err) - return err; -@@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, - if (err) - return err; - -- err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff, -- &port_buffer, &update_buffer); -+ err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, -+ xoff, &port_buffer, &update_buffer); - if (err) - return err; - } -@@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, - return -EINVAL; - - update_buffer = true; -- err = update_xoff_threshold(&port_buffer, xoff, mtu); -+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu); - if (err) - return err; - } -@@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, - /* Need to update buffer configuration if xoff value is changed */ - if (!update_buffer && xoff != priv->dcbx.xoff) { - update_buffer = true; -- err = update_xoff_threshold(&port_buffer, xoff, mtu); -+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu); - if (err) - return err; - } -diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c -index 3078491cc0d0..1539cf3de5dc 100644 ---- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c -+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c -@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev, - if (err) - return err; - -+ mutex_lock(&mdev->mlx5e_res.td.list_lock); - list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list); -+ mutex_unlock(&mdev->mlx5e_res.td.list_lock); - - return 0; - } -@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev, - void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, - struct mlx5e_tir *tir) - { -+ mutex_lock(&mdev->mlx5e_res.td.list_lock); - mlx5_core_destroy_tir(mdev, tir->tirn); - list_del(&tir->list); -+ mutex_unlock(&mdev->mlx5e_res.td.list_lock); - } - - static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, -@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) - } - - INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list); -+ mutex_init(&mdev->mlx5e_res.td.list_lock); - - return 0; - -@@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) - { - struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_tir *tir; -- int err = -ENOMEM; -+ int err = 0; - u32 tirn = 0; - int inlen; - void *in; - - inlen = MLX5_ST_SZ_BYTES(modify_tir_in); - in = kvzalloc(inlen, GFP_KERNEL); -- if (!in) -+ if (!in) { -+ err = -ENOMEM; - goto out; -+ } - - if (enable_uc_lb) - MLX5_SET(modify_tir_in, in, ctx.self_lb_block, -@@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) - - MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); - -+ mutex_lock(&mdev->mlx5e_res.td.list_lock); - list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { - tirn = tir->tirn; - err = mlx5_core_modify_tir(mdev, tirn, in, inlen); -@@ -168,6 +176,7 @@ out: - kvfree(in); - if (err) - netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); -+ mutex_unlock(&mdev->mlx5e_res.td.list_lock); - - return err; - } -diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c -index 5cf5f2a9d51f..8de64e88c670 100644 ---- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c -+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c -@@ -217,15 +217,21 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, - void *cmd; - int ret; - -+ rcu_read_lock(); -+ flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); -+ rcu_read_unlock(); -+ -+ if (!flow) { -+ WARN_ONCE(1, "Received NULL pointer for handle\n"); -+ return -EINVAL; -+ } -+ - buf = kzalloc(size, GFP_ATOMIC); - if (!buf) - return -ENOMEM; - - cmd = (buf + 1); - -- rcu_read_lock(); -- flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); -- rcu_read_unlock(); - mlx5_fpga_tls_flow_to_cmd(flow, cmd); - - MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); -@@ -238,6 +244,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, - buf->complete = mlx_tls_kfree_complete; - - ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); -+ if (ret < 0) -+ kfree(buf); - - return ret; - } -diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c -index be81b319b0dc..694edd899322 100644 ---- a/drivers/net/ethernet/mellanox/mlx5/core/main.c -+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c -@@ -163,26 +163,6 @@ static struct mlx5_profile profile[] = { - .size = 8, - .limit = 4 - }, -- .mr_cache[16] = { -- .size = 8, -- .limit = 4 -- }, -- .mr_cache[17] = { -- .size = 8, -- .limit = 4 -- }, -- .mr_cache[18] = { -- .size = 8, -- .limit = 4 -- }, -- .mr_cache[19] = { -- .size = 4, -- .limit = 2 -- }, -- .mr_cache[20] = { -- .size = 4, -- .limit = 2 -- }, - }, - }; - -diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c -index 69d7aebda09b..73db94e55fd0 100644 ---- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c -+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c -@@ -196,7 +196,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev) - ret = dev_queue_xmit(skb); - nfp_repr_inc_tx_stats(netdev, len, ret); - -- return ret; -+ return NETDEV_TX_OK; - } - - static int nfp_repr_stop(struct net_device *netdev) -@@ -384,7 +384,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, - netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); - netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS; - -- netdev->priv_flags |= IFF_NO_QUEUE; -+ netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL; - netdev->features |= NETIF_F_LLTX; - - if (nfp_app_has_tc(app)) { -diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c -index f55d177ae894..365cddbfc684 100644 ---- a/drivers/net/ethernet/realtek/r8169.c -+++ b/drivers/net/ethernet/realtek/r8169.c -@@ -28,6 +28,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -5332,7 +5333,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp) - tp->cp_cmd |= PktCntrDisable | INTT_1; - RTL_W16(tp, CPlusCmd, tp->cp_cmd); - -- RTL_W16(tp, IntrMitigate, 0x5151); -+ RTL_W16(tp, IntrMitigate, 0x5100); - - /* Work around for RxFIFO overflow. */ - if (tp->mac_version == RTL_GIGA_MAC_VER_11) { -@@ -7224,6 +7225,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) - return rc; - } - -+ /* Disable ASPM completely as that cause random device stop working -+ * problems as well as full system hangs for some PCIe devices users. -+ */ -+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); -+ - /* enable device (incl. PCI PM wakeup and hotplug setup) */ - rc = pcim_enable_device(pdev); - if (rc < 0) { -diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h -index e859ae2e42d5..49f41b64077b 100644 ---- a/drivers/net/hyperv/hyperv_net.h -+++ b/drivers/net/hyperv/hyperv_net.h -@@ -987,6 +987,7 @@ struct netvsc_device { - - wait_queue_head_t wait_drain; - bool destroy; -+ bool tx_disable; /* if true, do not wake up queue again */ - - /* Receive buffer allocated by us but manages by NetVSP */ - void *recv_buf; -diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c -index 813d195bbd57..e0dce373cdd9 100644 ---- a/drivers/net/hyperv/netvsc.c -+++ b/drivers/net/hyperv/netvsc.c -@@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void) - - init_waitqueue_head(&net_device->wait_drain); - net_device->destroy = false; -+ net_device->tx_disable = false; - - net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; - net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; -@@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev, - } else { - struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); - -- if (netif_tx_queue_stopped(txq) && -+ if (netif_tx_queue_stopped(txq) && !net_device->tx_disable && - (hv_get_avail_to_write_percent(&channel->outbound) > - RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) { - netif_tx_wake_queue(txq); -@@ -874,7 +875,8 @@ static inline int netvsc_send_pkt( - } else if (ret == -EAGAIN) { - netif_tx_stop_queue(txq); - ndev_ctx->eth_stats.stop_queue++; -- if (atomic_read(&nvchan->queue_sends) < 1) { -+ if (atomic_read(&nvchan->queue_sends) < 1 && -+ !net_device->tx_disable) { - netif_tx_wake_queue(txq); - ndev_ctx->eth_stats.wake_queue++; - ret = -ENOSPC; -diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c -index cf4897043e83..b20fb0fb595b 100644 ---- a/drivers/net/hyperv/netvsc_drv.c -+++ b/drivers/net/hyperv/netvsc_drv.c -@@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net) - rcu_read_unlock(); - } - -+static void netvsc_tx_enable(struct netvsc_device *nvscdev, -+ struct net_device *ndev) -+{ -+ nvscdev->tx_disable = false; -+ virt_wmb(); /* ensure queue wake up mechanism is on */ -+ -+ netif_tx_wake_all_queues(ndev); -+} -+ - static int netvsc_open(struct net_device *net) - { - struct net_device_context *ndev_ctx = netdev_priv(net); -@@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net) - rdev = nvdev->extension; - if (!rdev->link_state) { - netif_carrier_on(net); -- netif_tx_wake_all_queues(net); -+ netvsc_tx_enable(nvdev, net); - } - - if (vf_netdev) { -@@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev) - } - } - -+static void netvsc_tx_disable(struct netvsc_device *nvscdev, -+ struct net_device *ndev) -+{ -+ if (nvscdev) { -+ nvscdev->tx_disable = true; -+ virt_wmb(); /* ensure txq will not wake up after stop */ -+ } -+ -+ netif_tx_disable(ndev); -+} -+ - static int netvsc_close(struct net_device *net) - { - struct net_device_context *net_device_ctx = netdev_priv(net); -@@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net) - struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); - int ret; - -- netif_tx_disable(net); -+ netvsc_tx_disable(nvdev, net); - - /* No need to close rndis filter if it is removed already */ - if (!nvdev) -@@ -920,7 +940,7 @@ static int netvsc_detach(struct net_device *ndev, - - /* If device was up (receiving) then shutdown */ - if (netif_running(ndev)) { -- netif_tx_disable(ndev); -+ netvsc_tx_disable(nvdev, ndev); - - ret = rndis_filter_close(nvdev); - if (ret) { -@@ -1908,7 +1928,7 @@ static void netvsc_link_change(struct work_struct *w) - if (rdev->link_state) { - rdev->link_state = false; - netif_carrier_on(net); -- netif_tx_wake_all_queues(net); -+ netvsc_tx_enable(net_device, net); - } else { - notify = true; - } -@@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w) - if (!rdev->link_state) { - rdev->link_state = true; - netif_carrier_off(net); -- netif_tx_stop_all_queues(net); -+ netvsc_tx_disable(net_device, net); - } - kfree(event); - break; -@@ -1927,7 +1947,7 @@ static void netvsc_link_change(struct work_struct *w) - if (!rdev->link_state) { - rdev->link_state = true; - netif_carrier_off(net); -- netif_tx_stop_all_queues(net); -+ netvsc_tx_disable(net_device, net); - event->event = RNDIS_STATUS_MEDIA_CONNECT; - spin_lock_irqsave(&ndev_ctx->lock, flags); - list_add(&event->list, &ndev_ctx->reconfig_events); -diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c -index 74bebbdb4b15..9195f3476b1d 100644 ---- a/drivers/net/usb/qmi_wwan.c -+++ b/drivers/net/usb/qmi_wwan.c -@@ -1203,6 +1203,7 @@ static const struct usb_device_id products[] = { - {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ - {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ - {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ -+ {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */ - {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ - {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ - {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ -diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c -index 6d1a1abbed27..cd15c32b2e43 100644 ---- a/drivers/net/vrf.c -+++ b/drivers/net/vrf.c -@@ -1275,8 +1275,12 @@ static void vrf_setup(struct net_device *dev) - dev->priv_flags |= IFF_NO_QUEUE; - dev->priv_flags |= IFF_NO_RX_HANDLER; - -- dev->min_mtu = 0; -- dev->max_mtu = 0; -+ /* VRF devices do not care about MTU, but if the MTU is set -+ * too low then the ipv4 and ipv6 protocols are disabled -+ * which breaks networking. -+ */ -+ dev->min_mtu = IPV6_MIN_MTU; -+ dev->max_mtu = ETH_MAX_MTU; - } - - static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], -diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c -index 3f3df4c29f6e..905282a8ddaa 100644 ---- a/drivers/pci/hotplug/pciehp_ctrl.c -+++ b/drivers/pci/hotplug/pciehp_ctrl.c -@@ -115,6 +115,10 @@ static void remove_board(struct controller *ctrl, bool safe_removal) - * removed from the slot/adapter. - */ - msleep(1000); -+ -+ /* Ignore link or presence changes caused by power off */ -+ atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC), -+ &ctrl->pending_events); - } - - /* turn off Green LED */ -diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c -index e2a879e93d86..fba03a7d5c7f 100644 ---- a/drivers/pci/quirks.c -+++ b/drivers/pci/quirks.c -@@ -3877,6 +3877,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128, - /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */ - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130, - quirk_dma_func1_alias); -+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170, -+ quirk_dma_func1_alias); - /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */ - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172, - quirk_dma_func1_alias); -diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig -index 0840d27381ea..e0a04bfc873e 100644 ---- a/drivers/tty/Kconfig -+++ b/drivers/tty/Kconfig -@@ -441,4 +441,28 @@ config VCC - depends on SUN_LDOMS - help - Support for Sun logical domain consoles. -+ -+config LDISC_AUTOLOAD -+ bool "Automatically load TTY Line Disciplines" -+ default y -+ help -+ Historically the kernel has always automatically loaded any -+ line discipline that is in a kernel module when a user asks -+ for it to be loaded with the TIOCSETD ioctl, or through other -+ means. This is not always the best thing to do on systems -+ where you know you will not be using some of the more -+ "ancient" line disciplines, so prevent the kernel from doing -+ this unless the request is coming from a process with the -+ CAP_SYS_MODULE permissions. -+ -+ Say 'Y' here if you trust your userspace users to do the right -+ thing, or if you have only provided the line disciplines that -+ you know you will be using, or if you wish to continue to use -+ the traditional method of on-demand loading of these modules -+ by any user. -+ -+ This functionality can be changed at runtime with the -+ dev.tty.ldisc_autoload sysctl, this configuration option will -+ only set the default value of this functionality. -+ - endif # TTY -diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c -index 21ffcce16927..5fa250157025 100644 ---- a/drivers/tty/tty_io.c -+++ b/drivers/tty/tty_io.c -@@ -513,6 +513,8 @@ static const struct file_operations hung_up_tty_fops = { - static DEFINE_SPINLOCK(redirect_lock); - static struct file *redirect; - -+extern void tty_sysctl_init(void); -+ - /** - * tty_wakeup - request more data - * @tty: terminal -@@ -3483,6 +3485,7 @@ void console_sysfs_notify(void) - */ - int __init tty_init(void) - { -+ tty_sysctl_init(); - cdev_init(&tty_cdev, &tty_fops); - if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) || - register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0) -diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c -index 45eda69b150c..e38f104db174 100644 ---- a/drivers/tty/tty_ldisc.c -+++ b/drivers/tty/tty_ldisc.c -@@ -156,6 +156,13 @@ static void put_ldops(struct tty_ldisc_ops *ldops) - * takes tty_ldiscs_lock to guard against ldisc races - */ - -+#if defined(CONFIG_LDISC_AUTOLOAD) -+ #define INITIAL_AUTOLOAD_STATE 1 -+#else -+ #define INITIAL_AUTOLOAD_STATE 0 -+#endif -+static int tty_ldisc_autoload = INITIAL_AUTOLOAD_STATE; -+ - static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc) - { - struct tty_ldisc *ld; -@@ -170,6 +177,8 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc) - */ - ldops = get_ldops(disc); - if (IS_ERR(ldops)) { -+ if (!capable(CAP_SYS_MODULE) && !tty_ldisc_autoload) -+ return ERR_PTR(-EPERM); - request_module("tty-ldisc-%d", disc); - ldops = get_ldops(disc); - if (IS_ERR(ldops)) -@@ -845,3 +854,41 @@ void tty_ldisc_deinit(struct tty_struct *tty) - tty_ldisc_put(tty->ldisc); - tty->ldisc = NULL; - } -+ -+static int zero; -+static int one = 1; -+static struct ctl_table tty_table[] = { -+ { -+ .procname = "ldisc_autoload", -+ .data = &tty_ldisc_autoload, -+ .maxlen = sizeof(tty_ldisc_autoload), -+ .mode = 0644, -+ .proc_handler = proc_dointvec, -+ .extra1 = &zero, -+ .extra2 = &one, -+ }, -+ { } -+}; -+ -+static struct ctl_table tty_dir_table[] = { -+ { -+ .procname = "tty", -+ .mode = 0555, -+ .child = tty_table, -+ }, -+ { } -+}; -+ -+static struct ctl_table tty_root_table[] = { -+ { -+ .procname = "dev", -+ .mode = 0555, -+ .child = tty_dir_table, -+ }, -+ { } -+}; -+ -+void tty_sysctl_init(void) -+{ -+ register_sysctl_table(tty_root_table); -+} -diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c -index a0b07c331255..a38b65b97be0 100644 ---- a/drivers/virtio/virtio_ring.c -+++ b/drivers/virtio/virtio_ring.c -@@ -871,6 +871,8 @@ static struct virtqueue *vring_create_virtqueue_split( - GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO); - if (queue) - break; -+ if (!may_reduce_num) -+ return NULL; - } - - if (!num) -diff --git a/fs/block_dev.c b/fs/block_dev.c -index 58a4c1217fa8..06ef48ad1998 100644 ---- a/fs/block_dev.c -+++ b/fs/block_dev.c -@@ -298,10 +298,10 @@ static void blkdev_bio_end_io(struct bio *bio) - struct blkdev_dio *dio = bio->bi_private; - bool should_dirty = dio->should_dirty; - -- if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) { -- if (bio->bi_status && !dio->bio.bi_status) -- dio->bio.bi_status = bio->bi_status; -- } else { -+ if (bio->bi_status && !dio->bio.bi_status) -+ dio->bio.bi_status = bio->bi_status; -+ -+ if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) { - if (!dio->is_sync) { - struct kiocb *iocb = dio->iocb; - ssize_t ret; -diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c -index 6e1119496721..1d64a6b8e413 100644 ---- a/fs/btrfs/ioctl.c -+++ b/fs/btrfs/ioctl.c -@@ -501,6 +501,16 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - -+ /* -+ * If the fs is mounted with nologreplay, which requires it to be -+ * mounted in RO mode as well, we can not allow discard on free space -+ * inside block groups, because log trees refer to extents that are not -+ * pinned in a block group's free space cache (pinning the extents is -+ * precisely the first phase of replaying a log tree). -+ */ -+ if (btrfs_test_opt(fs_info, NOLOGREPLAY)) -+ return -EROFS; -+ - rcu_read_lock(); - list_for_each_entry_rcu(device, &fs_info->fs_devices->devices, - dev_list) { -diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c -index dc6140013ae8..61d22a56c0ba 100644 ---- a/fs/btrfs/props.c -+++ b/fs/btrfs/props.c -@@ -366,11 +366,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans, - - static int prop_compression_validate(const char *value, size_t len) - { -- if (!strncmp("lzo", value, len)) -+ if (!strncmp("lzo", value, 3)) - return 0; -- else if (!strncmp("zlib", value, len)) -+ else if (!strncmp("zlib", value, 4)) - return 0; -- else if (!strncmp("zstd", value, len)) -+ else if (!strncmp("zstd", value, 4)) - return 0; - - return -EINVAL; -@@ -396,7 +396,7 @@ static int prop_compression_apply(struct inode *inode, - btrfs_set_fs_incompat(fs_info, COMPRESS_LZO); - } else if (!strncmp("zlib", value, 4)) { - type = BTRFS_COMPRESS_ZLIB; -- } else if (!strncmp("zstd", value, len)) { -+ } else if (!strncmp("zstd", value, 4)) { - type = BTRFS_COMPRESS_ZSTD; - btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD); - } else { -diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c -index f2c0d863fb52..07cad54b84f1 100644 ---- a/fs/cifs/cifsfs.c -+++ b/fs/cifs/cifsfs.c -@@ -559,6 +559,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root) - tcon->ses->server->echo_interval / HZ); - if (tcon->snapshot_time) - seq_printf(s, ",snapshot=%llu", tcon->snapshot_time); -+ if (tcon->handle_timeout) -+ seq_printf(s, ",handletimeout=%u", tcon->handle_timeout); - /* convert actimeo and display it in seconds */ - seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ); - -diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h -index 1b25e6e95d45..6c934ab3722b 100644 ---- a/fs/cifs/cifsglob.h -+++ b/fs/cifs/cifsglob.h -@@ -59,6 +59,12 @@ - */ - #define CIFS_MAX_ACTIMEO (1 << 30) - -+/* -+ * Max persistent and resilient handle timeout (milliseconds). -+ * Windows durable max was 960000 (16 minutes) -+ */ -+#define SMB3_MAX_HANDLE_TIMEOUT 960000 -+ - /* - * MAX_REQ is the maximum number of requests that WE will send - * on one socket concurrently. -@@ -572,6 +578,7 @@ struct smb_vol { - struct nls_table *local_nls; - unsigned int echo_interval; /* echo interval in secs */ - __u64 snapshot_time; /* needed for timewarp tokens */ -+ __u32 handle_timeout; /* persistent and durable handle timeout in ms */ - unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */ - }; - -@@ -1028,6 +1035,7 @@ struct cifs_tcon { - __u32 vol_serial_number; - __le64 vol_create_time; - __u64 snapshot_time; /* for timewarp tokens - timestamp of snapshot */ -+ __u32 handle_timeout; /* persistent and durable handle timeout in ms */ - __u32 ss_flags; /* sector size flags */ - __u32 perf_sector_size; /* best sector size for perf */ - __u32 max_chunks; -diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c -index 9d4e60123db4..44e6ec85f832 100644 ---- a/fs/cifs/connect.c -+++ b/fs/cifs/connect.c -@@ -103,7 +103,7 @@ enum { - Opt_cruid, Opt_gid, Opt_file_mode, - Opt_dirmode, Opt_port, - Opt_blocksize, Opt_rsize, Opt_wsize, Opt_actimeo, -- Opt_echo_interval, Opt_max_credits, -+ Opt_echo_interval, Opt_max_credits, Opt_handletimeout, - Opt_snapshot, - - /* Mount options which take string value */ -@@ -208,6 +208,7 @@ static const match_table_t cifs_mount_option_tokens = { - { Opt_rsize, "rsize=%s" }, - { Opt_wsize, "wsize=%s" }, - { Opt_actimeo, "actimeo=%s" }, -+ { Opt_handletimeout, "handletimeout=%s" }, - { Opt_echo_interval, "echo_interval=%s" }, - { Opt_max_credits, "max_credits=%s" }, - { Opt_snapshot, "snapshot=%s" }, -@@ -1600,6 +1601,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, - - vol->actimeo = CIFS_DEF_ACTIMEO; - -+ /* Most clients set timeout to 0, allows server to use its default */ -+ vol->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */ -+ - /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */ - vol->ops = &smb30_operations; - vol->vals = &smbdefault_values; -@@ -1998,6 +2002,18 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, - goto cifs_parse_mount_err; - } - break; -+ case Opt_handletimeout: -+ if (get_option_ul(args, &option)) { -+ cifs_dbg(VFS, "%s: Invalid handletimeout value\n", -+ __func__); -+ goto cifs_parse_mount_err; -+ } -+ vol->handle_timeout = option; -+ if (vol->handle_timeout > SMB3_MAX_HANDLE_TIMEOUT) { -+ cifs_dbg(VFS, "Invalid handle cache timeout, longer than 16 minutes\n"); -+ goto cifs_parse_mount_err; -+ } -+ break; - case Opt_echo_interval: - if (get_option_ul(args, &option)) { - cifs_dbg(VFS, "%s: Invalid echo interval value\n", -@@ -3164,6 +3180,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info) - return 0; - if (tcon->snapshot_time != volume_info->snapshot_time) - return 0; -+ if (tcon->handle_timeout != volume_info->handle_timeout) -+ return 0; - return 1; - } - -@@ -3278,6 +3296,16 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info) - tcon->snapshot_time = volume_info->snapshot_time; - } - -+ if (volume_info->handle_timeout) { -+ if (ses->server->vals->protocol_id == 0) { -+ cifs_dbg(VFS, -+ "Use SMB2.1 or later for handle timeout option\n"); -+ rc = -EOPNOTSUPP; -+ goto out_fail; -+ } else -+ tcon->handle_timeout = volume_info->handle_timeout; -+ } -+ - tcon->ses = ses; - if (volume_info->password) { - tcon->password = kstrdup(volume_info->password, GFP_KERNEL); -diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c -index b204e84b87fb..b0e76d27d752 100644 ---- a/fs/cifs/smb2file.c -+++ b/fs/cifs/smb2file.c -@@ -68,7 +68,9 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, - - - if (oparms->tcon->use_resilient) { -- nr_ioctl_req.Timeout = 0; /* use server default (120 seconds) */ -+ /* default timeout is 0, servers pick default (120 seconds) */ -+ nr_ioctl_req.Timeout = -+ cpu_to_le32(oparms->tcon->handle_timeout); - nr_ioctl_req.Reserved = 0; - rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid, - fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY, -diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c -index 53642a237bf9..068febe37fe4 100644 ---- a/fs/cifs/smb2pdu.c -+++ b/fs/cifs/smb2pdu.c -@@ -1837,8 +1837,9 @@ add_lease_context(struct TCP_Server_Info *server, struct kvec *iov, - } - - static struct create_durable_v2 * --create_durable_v2_buf(struct cifs_fid *pfid) -+create_durable_v2_buf(struct cifs_open_parms *oparms) - { -+ struct cifs_fid *pfid = oparms->fid; - struct create_durable_v2 *buf; - - buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL); -@@ -1852,7 +1853,14 @@ create_durable_v2_buf(struct cifs_fid *pfid) - (struct create_durable_v2, Name)); - buf->ccontext.NameLength = cpu_to_le16(4); - -- buf->dcontext.Timeout = 0; /* Should this be configurable by workload */ -+ /* -+ * NB: Handle timeout defaults to 0, which allows server to choose -+ * (most servers default to 120 seconds) and most clients default to 0. -+ * This can be overridden at mount ("handletimeout=") if the user wants -+ * a different persistent (or resilient) handle timeout for all opens -+ * opens on a particular SMB3 mount. -+ */ -+ buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout); - buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); - generate_random_uuid(buf->dcontext.CreateGuid); - memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16); -@@ -1905,7 +1913,7 @@ add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec, - struct smb2_create_req *req = iov[0].iov_base; - unsigned int num = *num_iovec; - -- iov[num].iov_base = create_durable_v2_buf(oparms->fid); -+ iov[num].iov_base = create_durable_v2_buf(oparms); - if (iov[num].iov_base == NULL) - return -ENOMEM; - iov[num].iov_len = sizeof(struct create_durable_v2); -diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h -index 50fb0dee23e8..d35b8ec1c485 100644 ---- a/include/linux/bitrev.h -+++ b/include/linux/bitrev.h -@@ -34,41 +34,41 @@ static inline u32 __bitrev32(u32 x) - - #define __constant_bitrev32(x) \ - ({ \ -- u32 __x = x; \ -- __x = (__x >> 16) | (__x << 16); \ -- __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \ -- __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \ -- __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \ -- __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \ -- __x; \ -+ u32 ___x = x; \ -+ ___x = (___x >> 16) | (___x << 16); \ -+ ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); \ -+ ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \ -+ ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \ -+ ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \ -+ ___x; \ - }) - - #define __constant_bitrev16(x) \ - ({ \ -- u16 __x = x; \ -- __x = (__x >> 8) | (__x << 8); \ -- __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \ -- __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \ -- __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \ -- __x; \ -+ u16 ___x = x; \ -+ ___x = (___x >> 8) | (___x << 8); \ -+ ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4); \ -+ ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2); \ -+ ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1); \ -+ ___x; \ - }) - - #define __constant_bitrev8x4(x) \ - ({ \ -- u32 __x = x; \ -- __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \ -- __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \ -- __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \ -- __x; \ -+ u32 ___x = x; \ -+ ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \ -+ ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \ -+ ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \ -+ ___x; \ - }) - - #define __constant_bitrev8(x) \ - ({ \ -- u8 __x = x; \ -- __x = (__x >> 4) | (__x << 4); \ -- __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \ -- __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \ -- __x; \ -+ u8 ___x = x; \ -+ ___x = (___x >> 4) | (___x << 4); \ -+ ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2); \ -+ ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1); \ -+ ___x; \ - }) - - #define bitrev32(x) \ -diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h -index 83ae11cbd12c..7391f5fe4eda 100644 ---- a/include/linux/memcontrol.h -+++ b/include/linux/memcontrol.h -@@ -561,7 +561,10 @@ struct mem_cgroup *lock_page_memcg(struct page *page); - void __unlock_page_memcg(struct mem_cgroup *memcg); - void unlock_page_memcg(struct page *page); - --/* idx can be of type enum memcg_stat_item or node_stat_item */ -+/* -+ * idx can be of type enum memcg_stat_item or node_stat_item. -+ * Keep in sync with memcg_exact_page_state(). -+ */ - static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, - int idx) - { -diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h -index 54299251d40d..4f001619f854 100644 ---- a/include/linux/mlx5/driver.h -+++ b/include/linux/mlx5/driver.h -@@ -591,6 +591,8 @@ enum mlx5_pagefault_type_flags { - }; - - struct mlx5_td { -+ /* protects tirs list changes while tirs refresh */ -+ struct mutex list_lock; - struct list_head tirs_list; - u32 tdn; - }; -diff --git a/include/linux/string.h b/include/linux/string.h -index 7927b875f80c..6ab0a6fa512e 100644 ---- a/include/linux/string.h -+++ b/include/linux/string.h -@@ -150,6 +150,9 @@ extern void * memscan(void *,int,__kernel_size_t); - #ifndef __HAVE_ARCH_MEMCMP - extern int memcmp(const void *,const void *,__kernel_size_t); - #endif -+#ifndef __HAVE_ARCH_BCMP -+extern int bcmp(const void *,const void *,__kernel_size_t); -+#endif - #ifndef __HAVE_ARCH_MEMCHR - extern void * memchr(const void *,int,__kernel_size_t); - #endif -diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h -index fab02133a919..3dc70adfe5f5 100644 ---- a/include/linux/virtio_ring.h -+++ b/include/linux/virtio_ring.h -@@ -63,7 +63,7 @@ struct virtqueue; - /* - * Creates a virtqueue and allocates the descriptor ring. If - * may_reduce_num is set, then this may allocate a smaller ring than -- * expected. The caller should query virtqueue_get_ring_size to learn -+ * expected. The caller should query virtqueue_get_vring_size to learn - * the actual size of the ring. - */ - struct virtqueue *vring_create_virtqueue(unsigned int index, -diff --git a/include/net/ip.h b/include/net/ip.h -index be3cad9c2e4c..583526aad1d0 100644 ---- a/include/net/ip.h -+++ b/include/net/ip.h -@@ -677,7 +677,7 @@ int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp, - unsigned char __user *data, int optlen); - void ip_options_undo(struct ip_options *opt); - void ip_forward_options(struct sk_buff *skb); --int ip_options_rcv_srr(struct sk_buff *skb); -+int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev); - - /* - * Functions provided by ip_sockglue.c -diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h -index 99d4148e0f90..1c3126c14930 100644 ---- a/include/net/net_namespace.h -+++ b/include/net/net_namespace.h -@@ -58,6 +58,7 @@ struct net { - */ - spinlock_t rules_mod_lock; - -+ u32 hash_mix; - atomic64_t cookie_gen; - - struct list_head list; /* list of network namespaces */ -diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h -index 3d58acf94dd2..0612439909dc 100644 ---- a/include/net/netfilter/nf_tables.h -+++ b/include/net/netfilter/nf_tables.h -@@ -691,10 +691,12 @@ static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb, - gcb->elems[gcb->head.cnt++] = elem; - } - -+struct nft_expr_ops; - /** - * struct nft_expr_type - nf_tables expression type - * - * @select_ops: function to select nft_expr_ops -+ * @release_ops: release nft_expr_ops - * @ops: default ops, used when no select_ops functions is present - * @list: used internally - * @name: Identifier -@@ -707,6 +709,7 @@ static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb, - struct nft_expr_type { - const struct nft_expr_ops *(*select_ops)(const struct nft_ctx *, - const struct nlattr * const tb[]); -+ void (*release_ops)(const struct nft_expr_ops *ops); - const struct nft_expr_ops *ops; - struct list_head list; - const char *name; -diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h -index 16a842456189..d9b665151f3d 100644 ---- a/include/net/netns/hash.h -+++ b/include/net/netns/hash.h -@@ -2,16 +2,10 @@ - #ifndef __NET_NS_HASH_H__ - #define __NET_NS_HASH_H__ - --#include -- --struct net; -+#include - - static inline u32 net_hash_mix(const struct net *net) - { --#ifdef CONFIG_NET_NS -- return (u32)(((unsigned long)net) >> ilog2(sizeof(*net))); --#else -- return 0; --#endif -+ return net->hash_mix; - } - #endif -diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c -index e960c4f46ee0..b07a2acc4eec 100644 ---- a/kernel/irq/chip.c -+++ b/kernel/irq/chip.c -@@ -1384,6 +1384,10 @@ int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) - int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) - { - data = data->parent_data; -+ -+ if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE) -+ return 0; -+ - if (data->chip->irq_set_wake) - return data->chip->irq_set_wake(data, on); - -diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c -index 84fa255d0329..e16e022eae09 100644 ---- a/kernel/irq/irqdesc.c -+++ b/kernel/irq/irqdesc.c -@@ -558,6 +558,7 @@ int __init early_irq_init(void) - alloc_masks(&desc[i], node); - raw_spin_lock_init(&desc[i].lock); - lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); -+ mutex_init(&desc[i].request_mutex); - desc_set_defaults(i, &desc[i], node, NULL, NULL); - } - return arch_early_irq_init(); -diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 310d0637fe4b..5e61a1a99e38 100644 ---- a/kernel/sched/fair.c -+++ b/kernel/sched/fair.c -@@ -7713,10 +7713,10 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) - if (cfs_rq->last_h_load_update == now) - return; - -- cfs_rq->h_load_next = NULL; -+ WRITE_ONCE(cfs_rq->h_load_next, NULL); - for_each_sched_entity(se) { - cfs_rq = cfs_rq_of(se); -- cfs_rq->h_load_next = se; -+ WRITE_ONCE(cfs_rq->h_load_next, se); - if (cfs_rq->last_h_load_update == now) - break; - } -@@ -7726,7 +7726,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) - cfs_rq->last_h_load_update = now; - } - -- while ((se = cfs_rq->h_load_next) != NULL) { -+ while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { - load = cfs_rq->h_load; - load = div64_ul(load * se->avg.load_avg, - cfs_rq_load_avg(cfs_rq) + 1); -diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c -index 2c97e8c2d29f..0519a8805aab 100644 ---- a/kernel/time/alarmtimer.c -+++ b/kernel/time/alarmtimer.c -@@ -594,7 +594,7 @@ static ktime_t alarm_timer_remaining(struct k_itimer *timr, ktime_t now) - { - struct alarm *alarm = &timr->it.alarm.alarmtimer; - -- return ktime_sub(now, alarm->node.expires); -+ return ktime_sub(alarm->node.expires, now); - } - - /** -diff --git a/lib/string.c b/lib/string.c -index 38e4ca08e757..3ab861c1a857 100644 ---- a/lib/string.c -+++ b/lib/string.c -@@ -866,6 +866,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count) - EXPORT_SYMBOL(memcmp); - #endif - -+#ifndef __HAVE_ARCH_BCMP -+/** -+ * bcmp - returns 0 if and only if the buffers have identical contents. -+ * @a: pointer to first buffer. -+ * @b: pointer to second buffer. -+ * @len: size of buffers. -+ * -+ * The sign or magnitude of a non-zero return value has no particular -+ * meaning, and architectures may implement their own more efficient bcmp(). So -+ * while this particular implementation is a simple (tail) call to memcmp, do -+ * not rely on anything but whether the return value is zero or non-zero. -+ */ -+#undef bcmp -+int bcmp(const void *a, const void *b, size_t len) -+{ -+ return memcmp(a, b, len); -+} -+EXPORT_SYMBOL(bcmp); -+#endif -+ - #ifndef __HAVE_ARCH_MEMSCAN - /** - * memscan - Find a character in an area of memory. -diff --git a/mm/huge_memory.c b/mm/huge_memory.c -index faf357eaf0ce..8b03c698f86e 100644 ---- a/mm/huge_memory.c -+++ b/mm/huge_memory.c -@@ -753,6 +753,21 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, - spinlock_t *ptl; - - ptl = pmd_lock(mm, pmd); -+ if (!pmd_none(*pmd)) { -+ if (write) { -+ if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { -+ WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); -+ goto out_unlock; -+ } -+ entry = pmd_mkyoung(*pmd); -+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); -+ if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) -+ update_mmu_cache_pmd(vma, addr, pmd); -+ } -+ -+ goto out_unlock; -+ } -+ - entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); - if (pfn_t_devmap(pfn)) - entry = pmd_mkdevmap(entry); -@@ -764,11 +779,16 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, - if (pgtable) { - pgtable_trans_huge_deposit(mm, pmd, pgtable); - mm_inc_nr_ptes(mm); -+ pgtable = NULL; - } - - set_pmd_at(mm, addr, pmd, entry); - update_mmu_cache_pmd(vma, addr, pmd); -+ -+out_unlock: - spin_unlock(ptl); -+ if (pgtable) -+ pte_free(mm, pgtable); - } - - vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, -@@ -819,6 +839,20 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, - spinlock_t *ptl; - - ptl = pud_lock(mm, pud); -+ if (!pud_none(*pud)) { -+ if (write) { -+ if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) { -+ WARN_ON_ONCE(!is_huge_zero_pud(*pud)); -+ goto out_unlock; -+ } -+ entry = pud_mkyoung(*pud); -+ entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); -+ if (pudp_set_access_flags(vma, addr, pud, entry, 1)) -+ update_mmu_cache_pud(vma, addr, pud); -+ } -+ goto out_unlock; -+ } -+ - entry = pud_mkhuge(pfn_t_pud(pfn, prot)); - if (pfn_t_devmap(pfn)) - entry = pud_mkdevmap(entry); -@@ -828,6 +862,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, - } - set_pud_at(mm, addr, pud, entry); - update_mmu_cache_pud(vma, addr, pud); -+ -+out_unlock: - spin_unlock(ptl); - } - -diff --git a/mm/memcontrol.c b/mm/memcontrol.c -index 79a7d2a06bba..5bbf2de02a0f 100644 ---- a/mm/memcontrol.c -+++ b/mm/memcontrol.c -@@ -3882,6 +3882,22 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) - return &memcg->cgwb_domain; - } - -+/* -+ * idx can be of type enum memcg_stat_item or node_stat_item. -+ * Keep in sync with memcg_exact_page(). -+ */ -+static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx) -+{ -+ long x = atomic_long_read(&memcg->stat[idx]); -+ int cpu; -+ -+ for_each_online_cpu(cpu) -+ x += per_cpu_ptr(memcg->stat_cpu, cpu)->count[idx]; -+ if (x < 0) -+ x = 0; -+ return x; -+} -+ - /** - * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg - * @wb: bdi_writeback in question -@@ -3907,10 +3923,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, - struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); - struct mem_cgroup *parent; - -- *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); -+ *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY); - - /* this should eventually include NR_UNSTABLE_NFS */ -- *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); -+ *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK); - *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | - (1 << LRU_ACTIVE_FILE)); - *pheadroom = PAGE_COUNTER_MAX; -diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c -index ac92b2eb32b1..e4777614a8a0 100644 ---- a/net/bridge/br_multicast.c -+++ b/net/bridge/br_multicast.c -@@ -599,6 +599,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br, - if (ipv4_is_local_multicast(group)) - return 0; - -+ memset(&br_group, 0, sizeof(br_group)); - br_group.u.ip4 = group; - br_group.proto = htons(ETH_P_IP); - br_group.vid = vid; -@@ -1489,6 +1490,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br, - - own_query = port ? &port->ip4_own_query : &br->ip4_own_query; - -+ memset(&br_group, 0, sizeof(br_group)); - br_group.u.ip4 = group; - br_group.proto = htons(ETH_P_IP); - br_group.vid = vid; -@@ -1512,6 +1514,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br, - - own_query = port ? &port->ip6_own_query : &br->ip6_own_query; - -+ memset(&br_group, 0, sizeof(br_group)); - br_group.u.ip6 = *group; - br_group.proto = htons(ETH_P_IPV6); - br_group.vid = vid; -diff --git a/net/core/dev.c b/net/core/dev.c -index 5d03889502eb..12824e007e06 100644 ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -5014,8 +5014,10 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head, - if (pt_prev->list_func != NULL) - pt_prev->list_func(head, pt_prev, orig_dev); - else -- list_for_each_entry_safe(skb, next, head, list) -+ list_for_each_entry_safe(skb, next, head, list) { -+ skb_list_del_init(skb); - pt_prev->func(skb, skb->dev, pt_prev, orig_dev); -+ } - } - - static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) -diff --git a/net/core/ethtool.c b/net/core/ethtool.c -index 158264f7cfaf..3a7f19a61768 100644 ---- a/net/core/ethtool.c -+++ b/net/core/ethtool.c -@@ -1794,11 +1794,16 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) - WARN_ON_ONCE(!ret); - - gstrings.len = ret; -- data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN)); -- if (gstrings.len && !data) -- return -ENOMEM; - -- __ethtool_get_strings(dev, gstrings.string_set, data); -+ if (gstrings.len) { -+ data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN)); -+ if (!data) -+ return -ENOMEM; -+ -+ __ethtool_get_strings(dev, gstrings.string_set, data); -+ } else { -+ data = NULL; -+ } - - ret = -EFAULT; - if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) -@@ -1894,11 +1899,15 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) - return -EFAULT; - - stats.n_stats = n_stats; -- data = vzalloc(array_size(n_stats, sizeof(u64))); -- if (n_stats && !data) -- return -ENOMEM; - -- ops->get_ethtool_stats(dev, &stats, data); -+ if (n_stats) { -+ data = vzalloc(array_size(n_stats, sizeof(u64))); -+ if (!data) -+ return -ENOMEM; -+ ops->get_ethtool_stats(dev, &stats, data); -+ } else { -+ data = NULL; -+ } - - ret = -EFAULT; - if (copy_to_user(useraddr, &stats, sizeof(stats))) -@@ -1938,16 +1947,21 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) - return -EFAULT; - - stats.n_stats = n_stats; -- data = vzalloc(array_size(n_stats, sizeof(u64))); -- if (n_stats && !data) -- return -ENOMEM; - -- if (dev->phydev && !ops->get_ethtool_phy_stats) { -- ret = phy_ethtool_get_stats(dev->phydev, &stats, data); -- if (ret < 0) -- return ret; -+ if (n_stats) { -+ data = vzalloc(array_size(n_stats, sizeof(u64))); -+ if (!data) -+ return -ENOMEM; -+ -+ if (dev->phydev && !ops->get_ethtool_phy_stats) { -+ ret = phy_ethtool_get_stats(dev->phydev, &stats, data); -+ if (ret < 0) -+ goto out; -+ } else { -+ ops->get_ethtool_phy_stats(dev, &stats, data); -+ } - } else { -- ops->get_ethtool_phy_stats(dev, &stats, data); -+ data = NULL; - } - - ret = -EFAULT; -diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c -index b02fb19df2cc..40c249c574c1 100644 ---- a/net/core/net_namespace.c -+++ b/net/core/net_namespace.c -@@ -304,6 +304,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) - - refcount_set(&net->count, 1); - refcount_set(&net->passive, 1); -+ get_random_bytes(&net->hash_mix, sizeof(u32)); - net->dev_base_seq = 1; - net->user_ns = user_ns; - idr_init(&net->netns_ids); -diff --git a/net/core/skbuff.c b/net/core/skbuff.c -index 2415d9cb9b89..ef2cd5712098 100644 ---- a/net/core/skbuff.c -+++ b/net/core/skbuff.c -@@ -3801,7 +3801,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) - unsigned int delta_truesize; - struct sk_buff *lp; - -- if (unlikely(p->len + len >= 65536)) -+ if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) - return -E2BIG; - - lp = NAPI_GRO_CB(p)->last; -diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c -index 6ae89f2b541b..2d5734079e6b 100644 ---- a/net/ipv4/ip_gre.c -+++ b/net/ipv4/ip_gre.c -@@ -259,7 +259,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, - struct net *net = dev_net(skb->dev); - struct metadata_dst *tun_dst = NULL; - struct erspan_base_hdr *ershdr; -- struct erspan_metadata *pkt_md; - struct ip_tunnel_net *itn; - struct ip_tunnel *tunnel; - const struct iphdr *iph; -@@ -282,9 +281,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, - if (unlikely(!pskb_may_pull(skb, len))) - return PACKET_REJECT; - -- ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len); -- pkt_md = (struct erspan_metadata *)(ershdr + 1); -- - if (__iptunnel_pull_header(skb, - len, - htons(ETH_P_TEB), -@@ -292,8 +288,9 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, - goto drop; - - if (tunnel->collect_md) { -+ struct erspan_metadata *pkt_md, *md; - struct ip_tunnel_info *info; -- struct erspan_metadata *md; -+ unsigned char *gh; - __be64 tun_id; - __be16 flags; - -@@ -306,6 +303,14 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, - if (!tun_dst) - return PACKET_REJECT; - -+ /* skb can be uncloned in __iptunnel_pull_header, so -+ * old pkt_md is no longer valid and we need to reset -+ * it -+ */ -+ gh = skb_network_header(skb) + -+ skb_network_header_len(skb); -+ pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len + -+ sizeof(*ershdr)); - md = ip_tunnel_info_opts(&tun_dst->u.tun_info); - md->version = ver; - md2 = &md->u.md2; -diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c -index 1f4737b77067..ccf0d31b6ce5 100644 ---- a/net/ipv4/ip_input.c -+++ b/net/ipv4/ip_input.c -@@ -257,11 +257,10 @@ int ip_local_deliver(struct sk_buff *skb) - ip_local_deliver_finish); - } - --static inline bool ip_rcv_options(struct sk_buff *skb) -+static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev) - { - struct ip_options *opt; - const struct iphdr *iph; -- struct net_device *dev = skb->dev; - - /* It looks as overkill, because not all - IP options require packet mangling. -@@ -297,7 +296,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb) - } - } - -- if (ip_options_rcv_srr(skb)) -+ if (ip_options_rcv_srr(skb, dev)) - goto drop; - } - -@@ -353,7 +352,7 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk, - } - #endif - -- if (iph->ihl > 5 && ip_rcv_options(skb)) -+ if (iph->ihl > 5 && ip_rcv_options(skb, dev)) - goto drop; - - rt = skb_rtable(skb); -diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c -index 32a35043c9f5..3db31bb9df50 100644 ---- a/net/ipv4/ip_options.c -+++ b/net/ipv4/ip_options.c -@@ -612,7 +612,7 @@ void ip_forward_options(struct sk_buff *skb) - } - } - --int ip_options_rcv_srr(struct sk_buff *skb) -+int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev) - { - struct ip_options *opt = &(IPCB(skb)->opt); - int srrspace, srrptr; -@@ -647,7 +647,7 @@ int ip_options_rcv_srr(struct sk_buff *skb) - - orefdst = skb->_skb_refdst; - skb_dst_set(skb, NULL); -- err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev); -+ err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev); - rt2 = skb_rtable(skb); - if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { - skb_dst_drop(skb); -diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c -index cd4814f7e962..359da68d7c06 100644 ---- a/net/ipv4/tcp_dctcp.c -+++ b/net/ipv4/tcp_dctcp.c -@@ -67,11 +67,6 @@ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA; - module_param(dctcp_alpha_on_init, uint, 0644); - MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value"); - --static unsigned int dctcp_clamp_alpha_on_loss __read_mostly; --module_param(dctcp_clamp_alpha_on_loss, uint, 0644); --MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss, -- "parameter for clamping alpha on loss"); -- - static struct tcp_congestion_ops dctcp_reno; - - static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca) -@@ -164,21 +159,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags) - } - } - --static void dctcp_state(struct sock *sk, u8 new_state) -+static void dctcp_react_to_loss(struct sock *sk) - { -- if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) { -- struct dctcp *ca = inet_csk_ca(sk); -+ struct dctcp *ca = inet_csk_ca(sk); -+ struct tcp_sock *tp = tcp_sk(sk); - -- /* If this extension is enabled, we clamp dctcp_alpha to -- * max on packet loss; the motivation is that dctcp_alpha -- * is an indicator to the extend of congestion and packet -- * loss is an indicator of extreme congestion; setting -- * this in practice turned out to be beneficial, and -- * effectively assumes total congestion which reduces the -- * window by half. -- */ -- ca->dctcp_alpha = DCTCP_MAX_ALPHA; -- } -+ ca->loss_cwnd = tp->snd_cwnd; -+ tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U); -+} -+ -+static void dctcp_state(struct sock *sk, u8 new_state) -+{ -+ if (new_state == TCP_CA_Recovery && -+ new_state != inet_csk(sk)->icsk_ca_state) -+ dctcp_react_to_loss(sk); -+ /* We handle RTO in dctcp_cwnd_event to ensure that we perform only -+ * one loss-adjustment per RTT. -+ */ - } - - static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) -@@ -190,6 +187,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) - case CA_EVENT_ECN_NO_CE: - dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state); - break; -+ case CA_EVENT_LOSS: -+ dctcp_react_to_loss(sk); -+ break; - default: - /* Don't care for the rest. */ - break; -diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c -index 1aae9ab57fe9..00852f47a73d 100644 ---- a/net/ipv4/tcp_ipv4.c -+++ b/net/ipv4/tcp_ipv4.c -@@ -2578,7 +2578,8 @@ static void __net_exit tcp_sk_exit(struct net *net) - { - int cpu; - -- module_put(net->ipv4.tcp_congestion_control->owner); -+ if (net->ipv4.tcp_congestion_control) -+ module_put(net->ipv4.tcp_congestion_control->owner); - - for_each_possible_cpu(cpu) - inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); -diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c -index 26f25b6e2833..438f1a5fd19a 100644 ---- a/net/ipv6/ip6_gre.c -+++ b/net/ipv6/ip6_gre.c -@@ -524,11 +524,10 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) - return PACKET_REJECT; - } - --static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len, -- struct tnl_ptk_info *tpi) -+static int ip6erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, -+ int gre_hdr_len) - { - struct erspan_base_hdr *ershdr; -- struct erspan_metadata *pkt_md; - const struct ipv6hdr *ipv6h; - struct erspan_md2 *md2; - struct ip6_tnl *tunnel; -@@ -547,18 +546,16 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len, - if (unlikely(!pskb_may_pull(skb, len))) - return PACKET_REJECT; - -- ershdr = (struct erspan_base_hdr *)skb->data; -- pkt_md = (struct erspan_metadata *)(ershdr + 1); -- - if (__iptunnel_pull_header(skb, len, - htons(ETH_P_TEB), - false, false) < 0) - return PACKET_REJECT; - - if (tunnel->parms.collect_md) { -+ struct erspan_metadata *pkt_md, *md; - struct metadata_dst *tun_dst; - struct ip_tunnel_info *info; -- struct erspan_metadata *md; -+ unsigned char *gh; - __be64 tun_id; - __be16 flags; - -@@ -571,6 +568,14 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len, - if (!tun_dst) - return PACKET_REJECT; - -+ /* skb can be uncloned in __iptunnel_pull_header, so -+ * old pkt_md is no longer valid and we need to reset -+ * it -+ */ -+ gh = skb_network_header(skb) + -+ skb_network_header_len(skb); -+ pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len + -+ sizeof(*ershdr)); - info = &tun_dst->u.tun_info; - md = ip_tunnel_info_opts(info); - md->version = ver; -@@ -607,7 +612,7 @@ static int gre_rcv(struct sk_buff *skb) - - if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) || - tpi.proto == htons(ETH_P_ERSPAN2))) { -- if (ip6erspan_rcv(skb, hdr_len, &tpi) == PACKET_RCVD) -+ if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD) - return 0; - goto out; - } -diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c -index 5f9fa0302b5a..e71227390bec 100644 ---- a/net/ipv6/ip6_output.c -+++ b/net/ipv6/ip6_output.c -@@ -595,7 +595,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, - inet6_sk(skb->sk) : NULL; - struct ipv6hdr *tmp_hdr; - struct frag_hdr *fh; -- unsigned int mtu, hlen, left, len; -+ unsigned int mtu, hlen, left, len, nexthdr_offset; - int hroom, troom; - __be32 frag_id; - int ptr, offset = 0, err = 0; -@@ -606,6 +606,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, - goto fail; - hlen = err; - nexthdr = *prevhdr; -+ nexthdr_offset = prevhdr - skb_network_header(skb); - - mtu = ip6_skb_dst_mtu(skb); - -@@ -640,6 +641,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, - (err = skb_checksum_help(skb))) - goto fail; - -+ prevhdr = skb_network_header(skb) + nexthdr_offset; - hroom = LL_RESERVED_SPACE(rt->dst.dev); - if (skb_has_frag_list(skb)) { - unsigned int first_len = skb_pagelen(skb); -diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c -index 0c6403cf8b52..ade1390c6348 100644 ---- a/net/ipv6/ip6_tunnel.c -+++ b/net/ipv6/ip6_tunnel.c -@@ -627,7 +627,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, - rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, - eiph->daddr, eiph->saddr, 0, 0, - IPPROTO_IPIP, RT_TOS(eiph->tos), 0); -- if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) { -+ if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) { - if (!IS_ERR(rt)) - ip_rt_put(rt); - goto out; -@@ -636,7 +636,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, - } else { - if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, - skb2->dev) || -- skb_dst(skb2)->dev->type != ARPHRD_TUNNEL) -+ skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6) - goto out; - } - -diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c -index 07e21a82ce4c..b2109b74857d 100644 ---- a/net/ipv6/sit.c -+++ b/net/ipv6/sit.c -@@ -669,6 +669,10 @@ static int ipip6_rcv(struct sk_buff *skb) - !net_eq(tunnel->net, dev_net(tunnel->dev)))) - goto out; - -+ /* skb can be uncloned in iptunnel_pull_header, so -+ * old iph is no longer valid -+ */ -+ iph = (const struct iphdr *)skb_mac_header(skb); - err = IP_ECN_decapsulate(iph, skb); - if (unlikely(err)) { - if (log_ecn_error) -diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c -index 571d824e4e24..b919db02c7f9 100644 ---- a/net/kcm/kcmsock.c -+++ b/net/kcm/kcmsock.c -@@ -2054,14 +2054,14 @@ static int __init kcm_init(void) - if (err) - goto fail; - -- err = sock_register(&kcm_family_ops); -- if (err) -- goto sock_register_fail; -- - err = register_pernet_device(&kcm_net_ops); - if (err) - goto net_ops_fail; - -+ err = sock_register(&kcm_family_ops); -+ if (err) -+ goto sock_register_fail; -+ - err = kcm_proc_init(); - if (err) - goto proc_init_fail; -@@ -2069,12 +2069,12 @@ static int __init kcm_init(void) - return 0; - - proc_init_fail: -- unregister_pernet_device(&kcm_net_ops); -- --net_ops_fail: - sock_unregister(PF_KCM); - - sock_register_fail: -+ unregister_pernet_device(&kcm_net_ops); -+ -+net_ops_fail: - proto_unregister(&kcm_proto); - - fail: -@@ -2090,8 +2090,8 @@ fail: - static void __exit kcm_exit(void) - { - kcm_proc_exit(); -- unregister_pernet_device(&kcm_net_ops); - sock_unregister(PF_KCM); -+ unregister_pernet_device(&kcm_net_ops); - proto_unregister(&kcm_proto); - destroy_workqueue(kcm_wq); - -diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c -index e1724f9d8b9d..acb124ce92ec 100644 ---- a/net/netfilter/nf_tables_api.c -+++ b/net/netfilter/nf_tables_api.c -@@ -2119,9 +2119,11 @@ err1: - static void nf_tables_expr_destroy(const struct nft_ctx *ctx, - struct nft_expr *expr) - { -+ const struct nft_expr_type *type = expr->ops->type; -+ - if (expr->ops->destroy) - expr->ops->destroy(ctx, expr); -- module_put(expr->ops->type->owner); -+ module_put(type->owner); - } - - struct nft_expr *nft_expr_init(const struct nft_ctx *ctx, -@@ -2129,6 +2131,7 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx, - { - struct nft_expr_info info; - struct nft_expr *expr; -+ struct module *owner; - int err; - - err = nf_tables_expr_parse(ctx, nla, &info); -@@ -2148,7 +2151,11 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx, - err3: - kfree(expr); - err2: -- module_put(info.ops->type->owner); -+ owner = info.ops->type->owner; -+ if (info.ops->type->release_ops) -+ info.ops->type->release_ops(info.ops); -+ -+ module_put(owner); - err1: - return ERR_PTR(err); - } -@@ -2746,8 +2753,11 @@ err2: - nf_tables_rule_release(&ctx, rule); - err1: - for (i = 0; i < n; i++) { -- if (info[i].ops != NULL) -+ if (info[i].ops) { - module_put(info[i].ops->type->owner); -+ if (info[i].ops->type->release_ops) -+ info[i].ops->type->release_ops(info[i].ops); -+ } - } - kvfree(info); - return err; -diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c -index 0a4bad55a8aa..469f9da5073b 100644 ---- a/net/netfilter/nft_compat.c -+++ b/net/netfilter/nft_compat.c -@@ -22,23 +22,6 @@ - #include - #include - #include --#include -- --struct nft_xt { -- struct list_head head; -- struct nft_expr_ops ops; -- refcount_t refcnt; -- -- /* used only when transaction mutex is locked */ -- unsigned int listcnt; -- -- /* Unlike other expressions, ops doesn't have static storage duration. -- * nft core assumes they do. We use kfree_rcu so that nft core can -- * can check expr->ops->size even after nft_compat->destroy() frees -- * the nft_xt struct that holds the ops structure. -- */ -- struct rcu_head rcu_head; --}; - - /* Used for matches where *info is larger than X byte */ - #define NFT_MATCH_LARGE_THRESH 192 -@@ -47,46 +30,6 @@ struct nft_xt_match_priv { - void *info; - }; - --struct nft_compat_net { -- struct list_head nft_target_list; -- struct list_head nft_match_list; --}; -- --static unsigned int nft_compat_net_id __read_mostly; --static struct nft_expr_type nft_match_type; --static struct nft_expr_type nft_target_type; -- --static struct nft_compat_net *nft_compat_pernet(struct net *net) --{ -- return net_generic(net, nft_compat_net_id); --} -- --static void nft_xt_get(struct nft_xt *xt) --{ -- /* refcount_inc() warns on 0 -> 1 transition, but we can't -- * init the reference count to 1 in .select_ops -- we can't -- * undo such an increase when another expression inside the same -- * rule fails afterwards. -- */ -- if (xt->listcnt == 0) -- refcount_set(&xt->refcnt, 1); -- else -- refcount_inc(&xt->refcnt); -- -- xt->listcnt++; --} -- --static bool nft_xt_put(struct nft_xt *xt) --{ -- if (refcount_dec_and_test(&xt->refcnt)) { -- WARN_ON_ONCE(!list_empty(&xt->head)); -- kfree_rcu(xt, rcu_head); -- return true; -- } -- -- return false; --} -- - static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx, - const char *tablename) - { -@@ -281,7 +224,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, - struct xt_target *target = expr->ops->data; - struct xt_tgchk_param par; - size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO])); -- struct nft_xt *nft_xt; - u16 proto = 0; - bool inv = false; - union nft_entry e = {}; -@@ -305,8 +247,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, - if (!target->target) - return -EINVAL; - -- nft_xt = container_of(expr->ops, struct nft_xt, ops); -- nft_xt_get(nft_xt); - return 0; - } - -@@ -325,8 +265,8 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) - if (par.target->destroy != NULL) - par.target->destroy(&par); - -- if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops))) -- module_put(me); -+ module_put(me); -+ kfree(expr->ops); - } - - static int nft_extension_dump_info(struct sk_buff *skb, int attr, -@@ -499,7 +439,6 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, - struct xt_match *match = expr->ops->data; - struct xt_mtchk_param par; - size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO])); -- struct nft_xt *nft_xt; - u16 proto = 0; - bool inv = false; - union nft_entry e = {}; -@@ -515,13 +454,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, - - nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv); - -- ret = xt_check_match(&par, size, proto, inv); -- if (ret < 0) -- return ret; -- -- nft_xt = container_of(expr->ops, struct nft_xt, ops); -- nft_xt_get(nft_xt); -- return 0; -+ return xt_check_match(&par, size, proto, inv); - } - - static int -@@ -564,8 +497,8 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr, - if (par.match->destroy != NULL) - par.match->destroy(&par); - -- if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops))) -- module_put(me); -+ module_put(me); -+ kfree(expr->ops); - } - - static void -@@ -574,18 +507,6 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) - __nft_match_destroy(ctx, expr, nft_expr_priv(expr)); - } - --static void nft_compat_deactivate(const struct nft_ctx *ctx, -- const struct nft_expr *expr, -- enum nft_trans_phase phase) --{ -- struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops); -- -- if (phase == NFT_TRANS_ABORT || phase == NFT_TRANS_COMMIT) { -- if (--xt->listcnt == 0) -- list_del_init(&xt->head); -- } --} -- - static void - nft_match_large_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) - { -@@ -780,19 +701,13 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = { - .cb = nfnl_nft_compat_cb, - }; - --static bool nft_match_cmp(const struct xt_match *match, -- const char *name, u32 rev, u32 family) --{ -- return strcmp(match->name, name) == 0 && match->revision == rev && -- (match->family == NFPROTO_UNSPEC || match->family == family); --} -+static struct nft_expr_type nft_match_type; - - static const struct nft_expr_ops * - nft_match_select_ops(const struct nft_ctx *ctx, - const struct nlattr * const tb[]) - { -- struct nft_compat_net *cn; -- struct nft_xt *nft_match; -+ struct nft_expr_ops *ops; - struct xt_match *match; - unsigned int matchsize; - char *mt_name; -@@ -808,16 +723,6 @@ nft_match_select_ops(const struct nft_ctx *ctx, - rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV])); - family = ctx->family; - -- cn = nft_compat_pernet(ctx->net); -- -- /* Re-use the existing match if it's already loaded. */ -- list_for_each_entry(nft_match, &cn->nft_match_list, head) { -- struct xt_match *match = nft_match->ops.data; -- -- if (nft_match_cmp(match, mt_name, rev, family)) -- return &nft_match->ops; -- } -- - match = xt_request_find_match(family, mt_name, rev); - if (IS_ERR(match)) - return ERR_PTR(-ENOENT); -@@ -827,65 +732,62 @@ nft_match_select_ops(const struct nft_ctx *ctx, - goto err; - } - -- /* This is the first time we use this match, allocate operations */ -- nft_match = kzalloc(sizeof(struct nft_xt), GFP_KERNEL); -- if (nft_match == NULL) { -+ ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL); -+ if (!ops) { - err = -ENOMEM; - goto err; - } - -- refcount_set(&nft_match->refcnt, 0); -- nft_match->ops.type = &nft_match_type; -- nft_match->ops.eval = nft_match_eval; -- nft_match->ops.init = nft_match_init; -- nft_match->ops.destroy = nft_match_destroy; -- nft_match->ops.deactivate = nft_compat_deactivate; -- nft_match->ops.dump = nft_match_dump; -- nft_match->ops.validate = nft_match_validate; -- nft_match->ops.data = match; -+ ops->type = &nft_match_type; -+ ops->eval = nft_match_eval; -+ ops->init = nft_match_init; -+ ops->destroy = nft_match_destroy; -+ ops->dump = nft_match_dump; -+ ops->validate = nft_match_validate; -+ ops->data = match; - - matchsize = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize)); - if (matchsize > NFT_MATCH_LARGE_THRESH) { - matchsize = NFT_EXPR_SIZE(sizeof(struct nft_xt_match_priv)); - -- nft_match->ops.eval = nft_match_large_eval; -- nft_match->ops.init = nft_match_large_init; -- nft_match->ops.destroy = nft_match_large_destroy; -- nft_match->ops.dump = nft_match_large_dump; -+ ops->eval = nft_match_large_eval; -+ ops->init = nft_match_large_init; -+ ops->destroy = nft_match_large_destroy; -+ ops->dump = nft_match_large_dump; - } - -- nft_match->ops.size = matchsize; -+ ops->size = matchsize; - -- nft_match->listcnt = 0; -- list_add(&nft_match->head, &cn->nft_match_list); -- -- return &nft_match->ops; -+ return ops; - err: - module_put(match->me); - return ERR_PTR(err); - } - -+static void nft_match_release_ops(const struct nft_expr_ops *ops) -+{ -+ struct xt_match *match = ops->data; -+ -+ module_put(match->me); -+ kfree(ops); -+} -+ - static struct nft_expr_type nft_match_type __read_mostly = { - .name = "match", - .select_ops = nft_match_select_ops, -+ .release_ops = nft_match_release_ops, - .policy = nft_match_policy, - .maxattr = NFTA_MATCH_MAX, - .owner = THIS_MODULE, - }; - --static bool nft_target_cmp(const struct xt_target *tg, -- const char *name, u32 rev, u32 family) --{ -- return strcmp(tg->name, name) == 0 && tg->revision == rev && -- (tg->family == NFPROTO_UNSPEC || tg->family == family); --} -+static struct nft_expr_type nft_target_type; - - static const struct nft_expr_ops * - nft_target_select_ops(const struct nft_ctx *ctx, - const struct nlattr * const tb[]) - { -- struct nft_compat_net *cn; -- struct nft_xt *nft_target; -+ struct nft_expr_ops *ops; - struct xt_target *target; - char *tg_name; - u32 rev, family; -@@ -905,18 +807,6 @@ nft_target_select_ops(const struct nft_ctx *ctx, - strcmp(tg_name, "standard") == 0) - return ERR_PTR(-EINVAL); - -- cn = nft_compat_pernet(ctx->net); -- /* Re-use the existing target if it's already loaded. */ -- list_for_each_entry(nft_target, &cn->nft_target_list, head) { -- struct xt_target *target = nft_target->ops.data; -- -- if (!target->target) -- continue; -- -- if (nft_target_cmp(target, tg_name, rev, family)) -- return &nft_target->ops; -- } -- - target = xt_request_find_target(family, tg_name, rev); - if (IS_ERR(target)) - return ERR_PTR(-ENOENT); -@@ -931,113 +821,55 @@ nft_target_select_ops(const struct nft_ctx *ctx, - goto err; - } - -- /* This is the first time we use this target, allocate operations */ -- nft_target = kzalloc(sizeof(struct nft_xt), GFP_KERNEL); -- if (nft_target == NULL) { -+ ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL); -+ if (!ops) { - err = -ENOMEM; - goto err; - } - -- refcount_set(&nft_target->refcnt, 0); -- nft_target->ops.type = &nft_target_type; -- nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); -- nft_target->ops.init = nft_target_init; -- nft_target->ops.destroy = nft_target_destroy; -- nft_target->ops.deactivate = nft_compat_deactivate; -- nft_target->ops.dump = nft_target_dump; -- nft_target->ops.validate = nft_target_validate; -- nft_target->ops.data = target; -+ ops->type = &nft_target_type; -+ ops->size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); -+ ops->init = nft_target_init; -+ ops->destroy = nft_target_destroy; -+ ops->dump = nft_target_dump; -+ ops->validate = nft_target_validate; -+ ops->data = target; - - if (family == NFPROTO_BRIDGE) -- nft_target->ops.eval = nft_target_eval_bridge; -+ ops->eval = nft_target_eval_bridge; - else -- nft_target->ops.eval = nft_target_eval_xt; -- -- nft_target->listcnt = 0; -- list_add(&nft_target->head, &cn->nft_target_list); -+ ops->eval = nft_target_eval_xt; - -- return &nft_target->ops; -+ return ops; - err: - module_put(target->me); - return ERR_PTR(err); - } - -+static void nft_target_release_ops(const struct nft_expr_ops *ops) -+{ -+ struct xt_target *target = ops->data; -+ -+ module_put(target->me); -+ kfree(ops); -+} -+ - static struct nft_expr_type nft_target_type __read_mostly = { - .name = "target", - .select_ops = nft_target_select_ops, -+ .release_ops = nft_target_release_ops, - .policy = nft_target_policy, - .maxattr = NFTA_TARGET_MAX, - .owner = THIS_MODULE, - }; - --static int __net_init nft_compat_init_net(struct net *net) --{ -- struct nft_compat_net *cn = nft_compat_pernet(net); -- -- INIT_LIST_HEAD(&cn->nft_target_list); -- INIT_LIST_HEAD(&cn->nft_match_list); -- -- return 0; --} -- --static void __net_exit nft_compat_exit_net(struct net *net) --{ -- struct nft_compat_net *cn = nft_compat_pernet(net); -- struct nft_xt *xt, *next; -- -- if (list_empty(&cn->nft_match_list) && -- list_empty(&cn->nft_target_list)) -- return; -- -- /* If there was an error that caused nft_xt expr to not be initialized -- * fully and noone else requested the same expression later, the lists -- * contain 0-refcount entries that still hold module reference. -- * -- * Clean them here. -- */ -- mutex_lock(&net->nft.commit_mutex); -- list_for_each_entry_safe(xt, next, &cn->nft_target_list, head) { -- struct xt_target *target = xt->ops.data; -- -- list_del_init(&xt->head); -- -- if (refcount_read(&xt->refcnt)) -- continue; -- module_put(target->me); -- kfree(xt); -- } -- -- list_for_each_entry_safe(xt, next, &cn->nft_match_list, head) { -- struct xt_match *match = xt->ops.data; -- -- list_del_init(&xt->head); -- -- if (refcount_read(&xt->refcnt)) -- continue; -- module_put(match->me); -- kfree(xt); -- } -- mutex_unlock(&net->nft.commit_mutex); --} -- --static struct pernet_operations nft_compat_net_ops = { -- .init = nft_compat_init_net, -- .exit = nft_compat_exit_net, -- .id = &nft_compat_net_id, -- .size = sizeof(struct nft_compat_net), --}; -- - static int __init nft_compat_module_init(void) - { - int ret; - -- ret = register_pernet_subsys(&nft_compat_net_ops); -- if (ret < 0) -- goto err_target; -- - ret = nft_register_expr(&nft_match_type); - if (ret < 0) -- goto err_pernet; -+ return ret; - - ret = nft_register_expr(&nft_target_type); - if (ret < 0) -@@ -1054,8 +886,6 @@ err_target: - nft_unregister_expr(&nft_target_type); - err_match: - nft_unregister_expr(&nft_match_type); --err_pernet: -- unregister_pernet_subsys(&nft_compat_net_ops); - return ret; - } - -@@ -1064,7 +894,6 @@ static void __exit nft_compat_module_exit(void) - nfnetlink_subsys_unregister(&nfnl_compat_subsys); - nft_unregister_expr(&nft_target_type); - nft_unregister_expr(&nft_match_type); -- unregister_pernet_subsys(&nft_compat_net_ops); - } - - MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT); -diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c -index 691da853bef5..4bdf5e3ac208 100644 ---- a/net/openvswitch/flow_netlink.c -+++ b/net/openvswitch/flow_netlink.c -@@ -2306,14 +2306,14 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, - - struct sw_flow_actions *acts; - int new_acts_size; -- int req_size = NLA_ALIGN(attr_len); -+ size_t req_size = NLA_ALIGN(attr_len); - int next_offset = offsetof(struct sw_flow_actions, actions) + - (*sfa)->actions_len; - - if (req_size <= (ksize(*sfa) - next_offset)) - goto out; - -- new_acts_size = ksize(*sfa) * 2; -+ new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2); - - if (new_acts_size > MAX_ACTIONS_BUFSIZE) { - if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) { -diff --git a/net/rds/tcp.c b/net/rds/tcp.c -index c16f0a362c32..a729c47db781 100644 ---- a/net/rds/tcp.c -+++ b/net/rds/tcp.c -@@ -600,7 +600,7 @@ static void rds_tcp_kill_sock(struct net *net) - list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { - struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); - -- if (net != c_net || !tc->t_sock) -+ if (net != c_net) - continue; - if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) { - list_move_tail(&tc->t_tcp_node, &tmp_list); -diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c -index 1a0c682fd734..fd62fe6c8e73 100644 ---- a/net/sched/act_sample.c -+++ b/net/sched/act_sample.c -@@ -43,8 +43,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, - struct tc_action_net *tn = net_generic(net, sample_net_id); - struct nlattr *tb[TCA_SAMPLE_MAX + 1]; - struct psample_group *psample_group; -+ u32 psample_group_num, rate; - struct tc_sample *parm; -- u32 psample_group_num; - struct tcf_sample *s; - bool exists = false; - int ret, err; -@@ -80,6 +80,12 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, - return -EEXIST; - } - -+ rate = nla_get_u32(tb[TCA_SAMPLE_RATE]); -+ if (!rate) { -+ NL_SET_ERR_MSG(extack, "invalid sample rate"); -+ tcf_idr_release(*a, bind); -+ return -EINVAL; -+ } - psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]); - psample_group = psample_group_get(net, psample_group_num); - if (!psample_group) { -@@ -91,7 +97,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, - - spin_lock_bh(&s->tcf_lock); - s->tcf_action = parm->action; -- s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]); -+ s->rate = rate; - s->psample_group_num = psample_group_num; - RCU_INIT_POINTER(s->psample_group, psample_group); - -diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c -index 0e408ee9dcec..5ba07cd11e31 100644 ---- a/net/sched/cls_matchall.c -+++ b/net/sched/cls_matchall.c -@@ -125,6 +125,11 @@ static void mall_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack) - - static void *mall_get(struct tcf_proto *tp, u32 handle) - { -+ struct cls_mall_head *head = rtnl_dereference(tp->root); -+ -+ if (head && head->handle == handle) -+ return head; -+ - return NULL; - } - -diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c -index 6abc8b274270..951afdeea5e9 100644 ---- a/net/sctp/protocol.c -+++ b/net/sctp/protocol.c -@@ -600,6 +600,7 @@ out: - static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr) - { - /* No address mapping for V4 sockets */ -+ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero)); - return sizeof(struct sockaddr_in); - } - -diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c -index 21113bfd4eca..a5ae9c036b9c 100644 ---- a/net/sunrpc/xprtrdma/verbs.c -+++ b/net/sunrpc/xprtrdma/verbs.c -@@ -90,7 +90,7 @@ static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt) - /* Flush Receives, then wait for deferred Reply work - * to complete. - */ -- ib_drain_qp(ia->ri_id->qp); -+ ib_drain_rq(ia->ri_id->qp); - drain_workqueue(buf->rb_completion_wq); - - /* Deferred Reply processing might have scheduled -diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c -index 7d4640d1fe9f..38e7deab6384 100644 ---- a/sound/core/seq/seq_clientmgr.c -+++ b/sound/core/seq/seq_clientmgr.c -@@ -1252,7 +1252,7 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client, - - /* fill the info fields */ - if (client_info->name[0]) -- strlcpy(client->name, client_info->name, sizeof(client->name)); -+ strscpy(client->name, client_info->name, sizeof(client->name)); - - client->filter = client_info->filter; - client->event_lost = client_info->event_lost; -@@ -1530,7 +1530,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg) - /* set queue name */ - if (!info->name[0]) - snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue); -- strlcpy(q->name, info->name, sizeof(q->name)); -+ strscpy(q->name, info->name, sizeof(q->name)); - snd_use_lock_free(&q->use_lock); - - return 0; -@@ -1592,7 +1592,7 @@ static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client, - queuefree(q); - return -EPERM; - } -- strlcpy(q->name, info->name, sizeof(q->name)); -+ strscpy(q->name, info->name, sizeof(q->name)); - queuefree(q); - - return 0; -diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c -index ece256a3b48f..2ec91085fa3e 100644 ---- a/sound/pci/hda/hda_intel.c -+++ b/sound/pci/hda/hda_intel.c -@@ -2142,6 +2142,8 @@ static struct snd_pci_quirk power_save_blacklist[] = { - SND_PCI_QUIRK(0x8086, 0x2040, "Intel DZ77BH-55K", 0), - /* https://bugzilla.kernel.org/show_bug.cgi?id=199607 */ - SND_PCI_QUIRK(0x8086, 0x2057, "Intel NUC5i7RYB", 0), -+ /* https://bugs.launchpad.net/bugs/1821663 */ -+ SND_PCI_QUIRK(0x8086, 0x2064, "Intel SDP 8086:2064", 0), - /* https://bugzilla.redhat.com/show_bug.cgi?id=1520902 */ - SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0), - /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */ -@@ -2150,6 +2152,8 @@ static struct snd_pci_quirk power_save_blacklist[] = { - SND_PCI_QUIRK(0x17aa, 0x367b, "Lenovo IdeaCentre B550", 0), - /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */ - SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0), -+ /* https://bugs.launchpad.net/bugs/1821663 */ -+ SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0), - {} - }; - #endif /* CONFIG_PM */ -diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c -index 00c27b3b8c14..84fae0df59e9 100644 ---- a/sound/pci/hda/patch_realtek.c -+++ b/sound/pci/hda/patch_realtek.c -@@ -1864,8 +1864,8 @@ enum { - ALC887_FIXUP_BASS_CHMAP, - ALC1220_FIXUP_GB_DUAL_CODECS, - ALC1220_FIXUP_CLEVO_P950, -- ALC1220_FIXUP_SYSTEM76_ORYP5, -- ALC1220_FIXUP_SYSTEM76_ORYP5_PINS, -+ ALC1220_FIXUP_CLEVO_PB51ED, -+ ALC1220_FIXUP_CLEVO_PB51ED_PINS, - }; - - static void alc889_fixup_coef(struct hda_codec *codec, -@@ -2070,7 +2070,7 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec, - static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec, - const struct hda_fixup *fix, int action); - --static void alc1220_fixup_system76_oryp5(struct hda_codec *codec, -+static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec, - const struct hda_fixup *fix, - int action) - { -@@ -2322,18 +2322,18 @@ static const struct hda_fixup alc882_fixups[] = { - .type = HDA_FIXUP_FUNC, - .v.func = alc1220_fixup_clevo_p950, - }, -- [ALC1220_FIXUP_SYSTEM76_ORYP5] = { -+ [ALC1220_FIXUP_CLEVO_PB51ED] = { - .type = HDA_FIXUP_FUNC, -- .v.func = alc1220_fixup_system76_oryp5, -+ .v.func = alc1220_fixup_clevo_pb51ed, - }, -- [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = { -+ [ALC1220_FIXUP_CLEVO_PB51ED_PINS] = { - .type = HDA_FIXUP_PINS, - .v.pins = (const struct hda_pintbl[]) { - { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ - {} - }, - .chained = true, -- .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5, -+ .chain_id = ALC1220_FIXUP_CLEVO_PB51ED, - }, - }; - -@@ -2411,8 +2411,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { - SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), - SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950), - SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950), -- SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS), -- SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS), -+ SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS), -+ SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS), -+ SND_PCI_QUIRK(0x1558, 0x65d1, "Tuxedo Book XC1509", ALC1220_FIXUP_CLEVO_PB51ED_PINS), - SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), - SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), - SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), -@@ -5661,6 +5662,7 @@ enum { - ALC233_FIXUP_ASUS_MIC_NO_PRESENCE, - ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE, - ALC233_FIXUP_LENOVO_MULTI_CODECS, -+ ALC233_FIXUP_ACER_HEADSET_MIC, - ALC294_FIXUP_LENOVO_MIC_LOCATION, - ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE, - ALC700_FIXUP_INTEL_REFERENCE, -@@ -6488,6 +6490,16 @@ static const struct hda_fixup alc269_fixups[] = { - .type = HDA_FIXUP_FUNC, - .v.func = alc233_alc662_fixup_lenovo_dual_codecs, - }, -+ [ALC233_FIXUP_ACER_HEADSET_MIC] = { -+ .type = HDA_FIXUP_VERBS, -+ .v.verbs = (const struct hda_verb[]) { -+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 }, -+ { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 }, -+ { } -+ }, -+ .chained = true, -+ .chain_id = ALC233_FIXUP_ASUS_MIC_NO_PRESENCE -+ }, - [ALC294_FIXUP_LENOVO_MIC_LOCATION] = { - .type = HDA_FIXUP_PINS, - .v.pins = (const struct hda_pintbl[]) { -@@ -6735,6 +6747,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { - SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), - SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), - SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), -+ SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC), - SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC), - SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), - SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), -diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c -index afe67c865330..3623aa9a6f2e 100644 ---- a/sound/soc/fsl/fsl_esai.c -+++ b/sound/soc/fsl/fsl_esai.c -@@ -54,6 +54,8 @@ struct fsl_esai { - u32 fifo_depth; - u32 slot_width; - u32 slots; -+ u32 tx_mask; -+ u32 rx_mask; - u32 hck_rate[2]; - u32 sck_rate[2]; - bool hck_dir[2]; -@@ -361,21 +363,13 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask, - regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR, - ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots)); - -- regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA, -- ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask)); -- regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB, -- ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask)); -- - regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR, - ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots)); - -- regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA, -- ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask)); -- regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB, -- ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask)); -- - esai_priv->slot_width = slot_width; - esai_priv->slots = slots; -+ esai_priv->tx_mask = tx_mask; -+ esai_priv->rx_mask = rx_mask; - - return 0; - } -@@ -596,6 +590,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd, - bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; - u8 i, channels = substream->runtime->channels; - u32 pins = DIV_ROUND_UP(channels, esai_priv->slots); -+ u32 mask; - - switch (cmd) { - case SNDRV_PCM_TRIGGER_START: -@@ -608,15 +603,38 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd, - for (i = 0; tx && i < channels; i++) - regmap_write(esai_priv->regmap, REG_ESAI_ETDR, 0x0); - -+ /* -+ * When set the TE/RE in the end of enablement flow, there -+ * will be channel swap issue for multi data line case. -+ * In order to workaround this issue, we switch the bit -+ * enablement sequence to below sequence -+ * 1) clear the xSMB & xSMA: which is done in probe and -+ * stop state. -+ * 2) set TE/RE -+ * 3) set xSMB -+ * 4) set xSMA: xSMA is the last one in this flow, which -+ * will trigger esai to start. -+ */ - regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx), - tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, - tx ? ESAI_xCR_TE(pins) : ESAI_xCR_RE(pins)); -+ mask = tx ? esai_priv->tx_mask : esai_priv->rx_mask; -+ -+ regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx), -+ ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(mask)); -+ regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx), -+ ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(mask)); -+ - break; - case SNDRV_PCM_TRIGGER_SUSPEND: - case SNDRV_PCM_TRIGGER_STOP: - case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx), - tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, 0); -+ regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx), -+ ESAI_xSMA_xS_MASK, 0); -+ regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx), -+ ESAI_xSMB_xS_MASK, 0); - - /* Disable and reset FIFO */ - regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx), -@@ -906,6 +924,15 @@ static int fsl_esai_probe(struct platform_device *pdev) - return ret; - } - -+ esai_priv->tx_mask = 0xFFFFFFFF; -+ esai_priv->rx_mask = 0xFFFFFFFF; -+ -+ /* Clear the TSMA, TSMB, RSMA, RSMB */ -+ regmap_write(esai_priv->regmap, REG_ESAI_TSMA, 0); -+ regmap_write(esai_priv->regmap, REG_ESAI_TSMB, 0); -+ regmap_write(esai_priv->regmap, REG_ESAI_RSMA, 0); -+ regmap_write(esai_priv->regmap, REG_ESAI_RSMB, 0); -+ - ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component, - &fsl_esai_dai, 1); - if (ret) { -diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c -index 91a2436ce952..e9623da911d5 100644 ---- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c -+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c -@@ -711,9 +711,17 @@ static int sst_soc_probe(struct snd_soc_component *component) - return sst_dsp_init_v2_dpcm(component); - } - -+static void sst_soc_remove(struct snd_soc_component *component) -+{ -+ struct sst_data *drv = dev_get_drvdata(component->dev); -+ -+ drv->soc_card = NULL; -+} -+ - static const struct snd_soc_component_driver sst_soc_platform_drv = { - .name = DRV_NAME, - .probe = sst_soc_probe, -+ .remove = sst_soc_remove, - .ops = &sst_platform_ops, - .compr_ops = &sst_platform_compr_ops, - .pcm_new = sst_pcm_new, -diff --git a/sound/xen/xen_snd_front_alsa.c b/sound/xen/xen_snd_front_alsa.c -index a7f413cb704d..b14ab512c2ce 100644 ---- a/sound/xen/xen_snd_front_alsa.c -+++ b/sound/xen/xen_snd_front_alsa.c -@@ -441,7 +441,7 @@ static int shbuf_setup_backstore(struct xen_snd_front_pcm_stream_info *stream, - { - int i; - -- stream->buffer = alloc_pages_exact(stream->buffer_sz, GFP_KERNEL); -+ stream->buffer = alloc_pages_exact(buffer_sz, GFP_KERNEL); - if (!stream->buffer) - return -ENOMEM; - diff --git a/patch/kernel/sunxi-dev/patch-5.0.8-9.patch b/patch/kernel/sunxi-dev/patch-5.0.8-9.patch deleted file mode 100644 index ca293958d..000000000 --- a/patch/kernel/sunxi-dev/patch-5.0.8-9.patch +++ /dev/null @@ -1,3652 +0,0 @@ -diff --git a/Makefile b/Makefile -index f7666051de66..ef192ca04330 100644 ---- a/Makefile -+++ b/Makefile -@@ -1,7 +1,7 @@ - # SPDX-License-Identifier: GPL-2.0 - VERSION = 5 - PATCHLEVEL = 0 --SUBLEVEL = 8 -+SUBLEVEL = 9 - EXTRAVERSION = - NAME = Shy Crocodile - -diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig -index 87b23b7fb781..aefcf7a4e17a 100644 ---- a/arch/arc/configs/hsdk_defconfig -+++ b/arch/arc/configs/hsdk_defconfig -@@ -8,6 +8,7 @@ CONFIG_NAMESPACES=y - # CONFIG_UTS_NS is not set - # CONFIG_PID_NS is not set - CONFIG_BLK_DEV_INITRD=y -+CONFIG_BLK_DEV_RAM=y - CONFIG_EMBEDDED=y - CONFIG_PERF_EVENTS=y - # CONFIG_VM_EVENT_COUNTERS is not set -diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S -index 30e090625916..a72bbda2f7aa 100644 ---- a/arch/arc/kernel/head.S -+++ b/arch/arc/kernel/head.S -@@ -106,6 +106,7 @@ ENTRY(stext) - ; r2 = pointer to uboot provided cmdline or external DTB in mem - ; These are handled later in handle_uboot_args() - st r0, [@uboot_tag] -+ st r1, [@uboot_magic] - st r2, [@uboot_arg] - - ; setup "current" tsk and optionally cache it in dedicated r25 -diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c -index 7b2340996cf8..7b3a7b3b380c 100644 ---- a/arch/arc/kernel/setup.c -+++ b/arch/arc/kernel/setup.c -@@ -36,6 +36,7 @@ unsigned int intr_to_DE_cnt; - - /* Part of U-boot ABI: see head.S */ - int __initdata uboot_tag; -+int __initdata uboot_magic; - char __initdata *uboot_arg; - - const struct machine_desc *machine_desc; -@@ -497,6 +498,8 @@ static inline bool uboot_arg_invalid(unsigned long addr) - #define UBOOT_TAG_NONE 0 - #define UBOOT_TAG_CMDLINE 1 - #define UBOOT_TAG_DTB 2 -+/* We always pass 0 as magic from U-boot */ -+#define UBOOT_MAGIC_VALUE 0 - - void __init handle_uboot_args(void) - { -@@ -511,6 +514,11 @@ void __init handle_uboot_args(void) - goto ignore_uboot_args; - } - -+ if (uboot_magic != UBOOT_MAGIC_VALUE) { -+ pr_warn(IGNORE_ARGS "non zero uboot magic\n"); -+ goto ignore_uboot_args; -+ } -+ - if (uboot_tag != UBOOT_TAG_NONE && - uboot_arg_invalid((unsigned long)uboot_arg)) { - pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg); -diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c -index a50dc00d79a2..d0a05a3bdb96 100644 ---- a/arch/arm/kernel/patch.c -+++ b/arch/arm/kernel/patch.c -@@ -16,7 +16,7 @@ struct patch { - unsigned int insn; - }; - --static DEFINE_SPINLOCK(patch_lock); -+static DEFINE_RAW_SPINLOCK(patch_lock); - - static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) - __acquires(&patch_lock) -@@ -33,7 +33,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) - return addr; - - if (flags) -- spin_lock_irqsave(&patch_lock, *flags); -+ raw_spin_lock_irqsave(&patch_lock, *flags); - else - __acquire(&patch_lock); - -@@ -48,7 +48,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags) - clear_fixmap(fixmap); - - if (flags) -- spin_unlock_irqrestore(&patch_lock, *flags); -+ raw_spin_unlock_irqrestore(&patch_lock, *flags); - else - __release(&patch_lock); - } -diff --git a/arch/mips/bcm47xx/workarounds.c b/arch/mips/bcm47xx/workarounds.c -index 46eddbec8d9f..0ab95dd431b3 100644 ---- a/arch/mips/bcm47xx/workarounds.c -+++ b/arch/mips/bcm47xx/workarounds.c -@@ -24,6 +24,7 @@ void __init bcm47xx_workarounds(void) - case BCM47XX_BOARD_NETGEAR_WNR3500L: - bcm47xx_workarounds_enable_usb_power(12); - break; -+ case BCM47XX_BOARD_NETGEAR_WNDR3400V2: - case BCM47XX_BOARD_NETGEAR_WNDR3400_V3: - bcm47xx_workarounds_enable_usb_power(21); - break; -diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c -index d3f42b6bbdac..8a9cff1f129d 100644 ---- a/arch/x86/hyperv/hv_init.c -+++ b/arch/x86/hyperv/hv_init.c -@@ -102,9 +102,13 @@ static int hv_cpu_init(unsigned int cpu) - u64 msr_vp_index; - struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()]; - void **input_arg; -+ struct page *pg; - - input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); -- *input_arg = page_address(alloc_page(GFP_KERNEL)); -+ pg = alloc_page(GFP_KERNEL); -+ if (unlikely(!pg)) -+ return -ENOMEM; -+ *input_arg = page_address(pg); - - hv_get_vp_index(msr_vp_index); - -diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c -index 58176b56354e..294ed4392a0e 100644 ---- a/arch/x86/kernel/aperture_64.c -+++ b/arch/x86/kernel/aperture_64.c -@@ -14,6 +14,7 @@ - #define pr_fmt(fmt) "AGP: " fmt - - #include -+#include - #include - #include - #include -@@ -57,7 +58,7 @@ int fallback_aper_force __initdata; - - int fix_aperture __initdata = 1; - --#ifdef CONFIG_PROC_VMCORE -+#if defined(CONFIG_PROC_VMCORE) || defined(CONFIG_PROC_KCORE) - /* - * If the first kernel maps the aperture over e820 RAM, the kdump kernel will - * use the same range because it will remain configured in the northbridge. -@@ -66,20 +67,25 @@ int fix_aperture __initdata = 1; - */ - static unsigned long aperture_pfn_start, aperture_page_count; - --static int gart_oldmem_pfn_is_ram(unsigned long pfn) -+static int gart_mem_pfn_is_ram(unsigned long pfn) - { - return likely((pfn < aperture_pfn_start) || - (pfn >= aperture_pfn_start + aperture_page_count)); - } - --static void exclude_from_vmcore(u64 aper_base, u32 aper_order) -+static void __init exclude_from_core(u64 aper_base, u32 aper_order) - { - aperture_pfn_start = aper_base >> PAGE_SHIFT; - aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT; -- WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram)); -+#ifdef CONFIG_PROC_VMCORE -+ WARN_ON(register_oldmem_pfn_is_ram(&gart_mem_pfn_is_ram)); -+#endif -+#ifdef CONFIG_PROC_KCORE -+ WARN_ON(register_mem_pfn_is_ram(&gart_mem_pfn_is_ram)); -+#endif - } - #else --static void exclude_from_vmcore(u64 aper_base, u32 aper_order) -+static void exclude_from_core(u64 aper_base, u32 aper_order) - { - } - #endif -@@ -474,7 +480,7 @@ out: - * may have allocated the range over its e820 RAM - * and fixed up the northbridge - */ -- exclude_from_vmcore(last_aper_base, last_aper_order); -+ exclude_from_core(last_aper_base, last_aper_order); - - return 1; - } -@@ -520,7 +526,7 @@ out: - * overlap with the first kernel's memory. We can't access the - * range through vmcore even though it should be part of the dump. - */ -- exclude_from_vmcore(aper_alloc, aper_order); -+ exclude_from_core(aper_alloc, aper_order); - - /* Fix up the north bridges */ - for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) { -diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c -index d12226f60168..1d9b8aaea06c 100644 ---- a/arch/x86/kernel/cpu/cyrix.c -+++ b/arch/x86/kernel/cpu/cyrix.c -@@ -124,7 +124,7 @@ static void set_cx86_reorder(void) - setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ - - /* Load/Store Serialize to mem access disable (=reorder it) */ -- setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80); -+ setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80); - /* set load/store serialize from 1GB to 4GB */ - ccr3 |= 0xe0; - setCx86(CX86_CCR3, ccr3); -@@ -135,11 +135,11 @@ static void set_cx86_memwb(void) - pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); - - /* CCR2 bit 2: unlock NW bit */ -- setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04); -+ setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04); - /* set 'Not Write-through' */ - write_cr0(read_cr0() | X86_CR0_NW); - /* CCR2 bit 2: lock NW bit and set WT1 */ -- setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14); -+ setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14); - } - - /* -@@ -153,14 +153,14 @@ static void geode_configure(void) - local_irq_save(flags); - - /* Suspend on halt power saving and enable #SUSP pin */ -- setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88); -+ setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88); - - ccr3 = getCx86(CX86_CCR3); - setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ - - - /* FPU fast, DTE cache, Mem bypass */ -- setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38); -+ setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38); - setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ - - set_cx86_memwb(); -@@ -296,7 +296,7 @@ static void init_cyrix(struct cpuinfo_x86 *c) - /* GXm supports extended cpuid levels 'ala' AMD */ - if (c->cpuid_level == 2) { - /* Enable cxMMX extensions (GX1 Datasheet 54) */ -- setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1); -+ setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1); - - /* - * GXm : 0x30 ... 0x5f GXm datasheet 51 -@@ -319,7 +319,7 @@ static void init_cyrix(struct cpuinfo_x86 *c) - if (dir1 > 7) { - dir0_msn++; /* M II */ - /* Enable MMX extensions (App note 108) */ -- setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1); -+ setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1); - } else { - /* A 6x86MX - it has the bug. */ - set_cpu_bug(c, X86_BUG_COMA); -diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c -index dfd3aca82c61..fb32925a2e62 100644 ---- a/arch/x86/kernel/hpet.c -+++ b/arch/x86/kernel/hpet.c -@@ -905,6 +905,8 @@ int __init hpet_enable(void) - return 0; - - hpet_set_mapping(); -+ if (!hpet_virt_address) -+ return 0; - - /* - * Read the period and check for a sane value: -diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c -index 34a5c1715148..2882fe1d2a78 100644 ---- a/arch/x86/kernel/hw_breakpoint.c -+++ b/arch/x86/kernel/hw_breakpoint.c -@@ -357,6 +357,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, - #endif - default: - WARN_ON_ONCE(1); -+ return -EINVAL; - } - - /* -diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c -index 3482460d984d..1bfe5c6e6cfe 100644 ---- a/arch/x86/kernel/mpparse.c -+++ b/arch/x86/kernel/mpparse.c -@@ -598,8 +598,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length) - mpf_base = base; - mpf_found = true; - -- pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n", -- base, base + sizeof(*mpf) - 1, mpf); -+ pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n", -+ base, base + sizeof(*mpf) - 1); - - memblock_reserve(base, sizeof(*mpf)); - if (mpf->physptr) -diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c -index 2620baa1f699..507212d75ee2 100644 ---- a/block/blk-iolatency.c -+++ b/block/blk-iolatency.c -@@ -75,6 +75,7 @@ - #include - #include "blk-rq-qos.h" - #include "blk-stat.h" -+#include "blk.h" - - #define DEFAULT_SCALE_COOKIE 1000000U - -diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c -index 9d66a47d32fb..49e16f009095 100644 ---- a/drivers/acpi/ec.c -+++ b/drivers/acpi/ec.c -@@ -194,6 +194,7 @@ static struct workqueue_struct *ec_query_wq; - static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */ - static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */ - static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */ -+static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ - - /* -------------------------------------------------------------------------- - * Logging/Debugging -@@ -499,6 +500,26 @@ static inline void __acpi_ec_disable_event(struct acpi_ec *ec) - ec_log_drv("event blocked"); - } - -+/* -+ * Process _Q events that might have accumulated in the EC. -+ * Run with locked ec mutex. -+ */ -+static void acpi_ec_clear(struct acpi_ec *ec) -+{ -+ int i, status; -+ u8 value = 0; -+ -+ for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { -+ status = acpi_ec_query(ec, &value); -+ if (status || !value) -+ break; -+ } -+ if (unlikely(i == ACPI_EC_CLEAR_MAX)) -+ pr_warn("Warning: Maximum of %d stale EC events cleared\n", i); -+ else -+ pr_info("%d stale EC events cleared\n", i); -+} -+ - static void acpi_ec_enable_event(struct acpi_ec *ec) - { - unsigned long flags; -@@ -507,6 +528,10 @@ static void acpi_ec_enable_event(struct acpi_ec *ec) - if (acpi_ec_started(ec)) - __acpi_ec_enable_event(ec); - spin_unlock_irqrestore(&ec->lock, flags); -+ -+ /* Drain additional events if hardware requires that */ -+ if (EC_FLAGS_CLEAR_ON_RESUME) -+ acpi_ec_clear(ec); - } - - #ifdef CONFIG_PM_SLEEP -@@ -1820,6 +1845,31 @@ static int ec_flag_query_handshake(const struct dmi_system_id *id) - } - #endif - -+/* -+ * On some hardware it is necessary to clear events accumulated by the EC during -+ * sleep. These ECs stop reporting GPEs until they are manually polled, if too -+ * many events are accumulated. (e.g. Samsung Series 5/9 notebooks) -+ * -+ * https://bugzilla.kernel.org/show_bug.cgi?id=44161 -+ * -+ * Ideally, the EC should also be instructed NOT to accumulate events during -+ * sleep (which Windows seems to do somehow), but the interface to control this -+ * behaviour is not known at this time. -+ * -+ * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx, -+ * however it is very likely that other Samsung models are affected. -+ * -+ * On systems which don't accumulate _Q events during sleep, this extra check -+ * should be harmless. -+ */ -+static int ec_clear_on_resume(const struct dmi_system_id *id) -+{ -+ pr_debug("Detected system needing EC poll on resume.\n"); -+ EC_FLAGS_CLEAR_ON_RESUME = 1; -+ ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS; -+ return 0; -+} -+ - /* - * Some ECDTs contain wrong register addresses. - * MSI MS-171F -@@ -1869,6 +1919,9 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = { - ec_honor_ecdt_gpe, "ASUS X580VD", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL}, -+ { -+ ec_clear_on_resume, "Samsung hardware", { -+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, - {}, - }; - -diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c -index 78db97687f26..c4b06cc075f9 100644 ---- a/drivers/acpi/utils.c -+++ b/drivers/acpi/utils.c -@@ -800,6 +800,7 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv) - match.hrv = hrv; - - dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb); -+ put_device(dev); - return !!dev; - } - EXPORT_SYMBOL(acpi_dev_present); -diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c -index 9ad93ea42fdc..3cde351fb5c9 100644 ---- a/drivers/auxdisplay/hd44780.c -+++ b/drivers/auxdisplay/hd44780.c -@@ -280,6 +280,8 @@ static int hd44780_remove(struct platform_device *pdev) - struct charlcd *lcd = platform_get_drvdata(pdev); - - charlcd_unregister(lcd); -+ -+ kfree(lcd); - return 0; - } - -diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c -index 500de1dee967..a00ca6b8117b 100644 ---- a/drivers/base/power/domain.c -+++ b/drivers/base/power/domain.c -@@ -1467,12 +1467,12 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, - if (IS_ERR(gpd_data)) - return PTR_ERR(gpd_data); - -- genpd_lock(genpd); -- - ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; - if (ret) - goto out; - -+ genpd_lock(genpd); -+ - dev_pm_domain_set(dev, &genpd->domain); - - genpd->device_count++; -@@ -1480,9 +1480,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, - - list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); - -- out: - genpd_unlock(genpd); -- -+ out: - if (ret) - genpd_free_dev_data(dev, gpd_data); - else -@@ -1531,15 +1530,15 @@ static int genpd_remove_device(struct generic_pm_domain *genpd, - genpd->device_count--; - genpd->max_off_time_changed = true; - -- if (genpd->detach_dev) -- genpd->detach_dev(genpd, dev); -- - dev_pm_domain_set(dev, NULL); - - list_del_init(&pdd->list_node); - - genpd_unlock(genpd); - -+ if (genpd->detach_dev) -+ genpd->detach_dev(genpd, dev); -+ - genpd_free_dev_data(dev, gpd_data); - - return 0; -diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c -index 96670eefaeb2..6d415b20fb70 100644 ---- a/drivers/block/paride/pcd.c -+++ b/drivers/block/paride/pcd.c -@@ -314,6 +314,7 @@ static void pcd_init_units(void) - disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops, - 1, BLK_MQ_F_SHOULD_MERGE); - if (IS_ERR(disk->queue)) { -+ put_disk(disk); - disk->queue = NULL; - continue; - } -@@ -749,8 +750,14 @@ static int pcd_detect(void) - return 0; - - printk("%s: No CD-ROM drive found\n", name); -- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) -+ for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { -+ if (!cd->disk) -+ continue; -+ blk_cleanup_queue(cd->disk->queue); -+ cd->disk->queue = NULL; -+ blk_mq_free_tag_set(&cd->tag_set); - put_disk(cd->disk); -+ } - pi_unregister_driver(par_drv); - return -1; - } -@@ -1006,8 +1013,14 @@ static int __init pcd_init(void) - pcd_probe_capabilities(); - - if (register_blkdev(major, name)) { -- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) -+ for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { -+ if (!cd->disk) -+ continue; -+ -+ blk_cleanup_queue(cd->disk->queue); -+ blk_mq_free_tag_set(&cd->tag_set); - put_disk(cd->disk); -+ } - return -EBUSY; - } - -@@ -1028,6 +1041,9 @@ static void __exit pcd_exit(void) - int unit; - - for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { -+ if (!cd->disk) -+ continue; -+ - if (cd->present) { - del_gendisk(cd->disk); - pi_release(cd->pi); -diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c -index e92e7a8eeeb2..35e6e271b219 100644 ---- a/drivers/block/paride/pf.c -+++ b/drivers/block/paride/pf.c -@@ -761,8 +761,14 @@ static int pf_detect(void) - return 0; - - printk("%s: No ATAPI disk detected\n", name); -- for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) -+ for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { -+ if (!pf->disk) -+ continue; -+ blk_cleanup_queue(pf->disk->queue); -+ pf->disk->queue = NULL; -+ blk_mq_free_tag_set(&pf->tag_set); - put_disk(pf->disk); -+ } - pi_unregister_driver(par_drv); - return -1; - } -@@ -1025,8 +1031,13 @@ static int __init pf_init(void) - pf_busy = 0; - - if (register_blkdev(major, name)) { -- for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) -+ for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { -+ if (!pf->disk) -+ continue; -+ blk_cleanup_queue(pf->disk->queue); -+ blk_mq_free_tag_set(&pf->tag_set); - put_disk(pf->disk); -+ } - return -EBUSY; - } - -@@ -1047,13 +1058,18 @@ static void __exit pf_exit(void) - int unit; - unregister_blkdev(major, name); - for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { -- if (!pf->present) -+ if (!pf->disk) - continue; -- del_gendisk(pf->disk); -+ -+ if (pf->present) -+ del_gendisk(pf->disk); -+ - blk_cleanup_queue(pf->disk->queue); - blk_mq_free_tag_set(&pf->tag_set); - put_disk(pf->disk); -- pi_release(pf->pi); -+ -+ if (pf->present) -+ pi_release(pf->pi); - } - } - -diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c -index f3442c2bdbdc..3c70004240d6 100644 ---- a/drivers/crypto/axis/artpec6_crypto.c -+++ b/drivers/crypto/axis/artpec6_crypto.c -@@ -284,6 +284,7 @@ enum artpec6_crypto_hash_flags { - - struct artpec6_crypto_req_common { - struct list_head list; -+ struct list_head complete_in_progress; - struct artpec6_crypto_dma_descriptors *dma; - struct crypto_async_request *req; - void (*complete)(struct crypto_async_request *req); -@@ -2045,7 +2046,8 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq) - return artpec6_crypto_dma_map_descs(common); - } - --static void artpec6_crypto_process_queue(struct artpec6_crypto *ac) -+static void artpec6_crypto_process_queue(struct artpec6_crypto *ac, -+ struct list_head *completions) - { - struct artpec6_crypto_req_common *req; - -@@ -2056,7 +2058,7 @@ static void artpec6_crypto_process_queue(struct artpec6_crypto *ac) - list_move_tail(&req->list, &ac->pending); - artpec6_crypto_start_dma(req); - -- req->req->complete(req->req, -EINPROGRESS); -+ list_add_tail(&req->complete_in_progress, completions); - } - - /* -@@ -2086,6 +2088,11 @@ static void artpec6_crypto_task(unsigned long data) - struct artpec6_crypto *ac = (struct artpec6_crypto *)data; - struct artpec6_crypto_req_common *req; - struct artpec6_crypto_req_common *n; -+ struct list_head complete_done; -+ struct list_head complete_in_progress; -+ -+ INIT_LIST_HEAD(&complete_done); -+ INIT_LIST_HEAD(&complete_in_progress); - - if (list_empty(&ac->pending)) { - pr_debug("Spurious IRQ\n"); -@@ -2119,19 +2126,30 @@ static void artpec6_crypto_task(unsigned long data) - - pr_debug("Completing request %p\n", req); - -- list_del(&req->list); -+ list_move_tail(&req->list, &complete_done); - - artpec6_crypto_dma_unmap_all(req); - artpec6_crypto_copy_bounce_buffers(req); - - ac->pending_count--; - artpec6_crypto_common_destroy(req); -- req->complete(req->req); - } - -- artpec6_crypto_process_queue(ac); -+ artpec6_crypto_process_queue(ac, &complete_in_progress); - - spin_unlock_bh(&ac->queue_lock); -+ -+ /* Perform the completion callbacks without holding the queue lock -+ * to allow new request submissions from the callbacks. -+ */ -+ list_for_each_entry_safe(req, n, &complete_done, list) { -+ req->complete(req->req); -+ } -+ -+ list_for_each_entry_safe(req, n, &complete_in_progress, -+ complete_in_progress) { -+ req->req->complete(req->req, -EINPROGRESS); -+ } - } - - static void artpec6_crypto_complete_crypto(struct crypto_async_request *req) -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c -index 3a9b48b227ac..a7208ca0bfe3 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c -@@ -546,7 +546,7 @@ static int psp_load_fw(struct amdgpu_device *adev) - struct psp_context *psp = &adev->psp; - - if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) { -- psp_ring_destroy(psp, PSP_RING_TYPE__KM); -+ psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */ - goto skip_memalloc; - } - -diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c -index 47243165a082..ae90a99909ef 100644 ---- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c -+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c -@@ -323,57 +323,7 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd, - struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, - struct queue_properties *q) - { -- uint64_t addr; -- struct cik_mqd *m; -- int retval; -- -- retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd), -- mqd_mem_obj); -- -- if (retval != 0) -- return -ENOMEM; -- -- m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr; -- addr = (*mqd_mem_obj)->gpu_addr; -- -- memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256)); -- -- m->header = 0xC0310800; -- m->compute_pipelinestat_enable = 1; -- m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF; -- m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; -- m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; -- m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; -- -- m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE | -- PRELOAD_REQ; -- m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS | -- QUANTUM_DURATION(10); -- -- m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN; -- m->cp_mqd_base_addr_lo = lower_32_bits(addr); -- m->cp_mqd_base_addr_hi = upper_32_bits(addr); -- -- m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE; -- -- /* -- * Pipe Priority -- * Identifies the pipe relative priority when this queue is connected -- * to the pipeline. The pipe priority is against the GFX pipe and HP3D. -- * In KFD we are using a fixed pipe priority set to CS_MEDIUM. -- * 0 = CS_LOW (typically below GFX) -- * 1 = CS_MEDIUM (typically between HP3D and GFX -- * 2 = CS_HIGH (typically above HP3D) -- */ -- m->cp_hqd_pipe_priority = 1; -- m->cp_hqd_queue_priority = 15; -- -- *mqd = m; -- if (gart_addr) -- *gart_addr = addr; -- retval = mm->update_mqd(mm, m, q); -- -- return retval; -+ return init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q); - } - - static int update_mqd_hiq(struct mqd_manager *mm, void *mqd, -diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c -index 0573eab0e190..f35e4ab55b27 100644 ---- a/drivers/gpu/drm/exynos/exynos_mixer.c -+++ b/drivers/gpu/drm/exynos/exynos_mixer.c -@@ -20,6 +20,7 @@ - #include "regs-vp.h" - - #include -+#include - #include - #include - #include -@@ -352,15 +353,62 @@ static void mixer_cfg_vp_blend(struct mixer_context *ctx, unsigned int alpha) - mixer_reg_write(ctx, MXR_VIDEO_CFG, val); - } - --static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable) -+static bool mixer_is_synced(struct mixer_context *ctx) - { -- /* block update on vsync */ -- mixer_reg_writemask(ctx, MXR_STATUS, enable ? -- MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE); -+ u32 base, shadow; - -+ if (ctx->mxr_ver == MXR_VER_16_0_33_0 || -+ ctx->mxr_ver == MXR_VER_128_0_0_184) -+ return !(mixer_reg_read(ctx, MXR_CFG) & -+ MXR_CFG_LAYER_UPDATE_COUNT_MASK); -+ -+ if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) && -+ vp_reg_read(ctx, VP_SHADOW_UPDATE)) -+ return false; -+ -+ base = mixer_reg_read(ctx, MXR_CFG); -+ shadow = mixer_reg_read(ctx, MXR_CFG_S); -+ if (base != shadow) -+ return false; -+ -+ base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0)); -+ shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0)); -+ if (base != shadow) -+ return false; -+ -+ base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1)); -+ shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1)); -+ if (base != shadow) -+ return false; -+ -+ return true; -+} -+ -+static int mixer_wait_for_sync(struct mixer_context *ctx) -+{ -+ ktime_t timeout = ktime_add_us(ktime_get(), 100000); -+ -+ while (!mixer_is_synced(ctx)) { -+ usleep_range(1000, 2000); -+ if (ktime_compare(ktime_get(), timeout) > 0) -+ return -ETIMEDOUT; -+ } -+ return 0; -+} -+ -+static void mixer_disable_sync(struct mixer_context *ctx) -+{ -+ mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_SYNC_ENABLE); -+} -+ -+static void mixer_enable_sync(struct mixer_context *ctx) -+{ -+ if (ctx->mxr_ver == MXR_VER_16_0_33_0 || -+ ctx->mxr_ver == MXR_VER_128_0_0_184) -+ mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); -+ mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SYNC_ENABLE); - if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) -- vp_reg_write(ctx, VP_SHADOW_UPDATE, enable ? -- VP_SHADOW_UPDATE_ENABLE : 0); -+ vp_reg_write(ctx, VP_SHADOW_UPDATE, VP_SHADOW_UPDATE_ENABLE); - } - - static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height) -@@ -498,7 +546,6 @@ static void vp_video_buffer(struct mixer_context *ctx, - - spin_lock_irqsave(&ctx->reg_slock, flags); - -- vp_reg_write(ctx, VP_SHADOW_UPDATE, 1); - /* interlace or progressive scan mode */ - val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0); - vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP); -@@ -553,11 +600,6 @@ static void vp_video_buffer(struct mixer_context *ctx, - vp_regs_dump(ctx); - } - --static void mixer_layer_update(struct mixer_context *ctx) --{ -- mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); --} -- - static void mixer_graph_buffer(struct mixer_context *ctx, - struct exynos_drm_plane *plane) - { -@@ -640,11 +682,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx, - mixer_cfg_layer(ctx, win, priority, true); - mixer_cfg_gfx_blend(ctx, win, pixel_alpha, state->base.alpha); - -- /* layer update mandatory for mixer 16.0.33.0 */ -- if (ctx->mxr_ver == MXR_VER_16_0_33_0 || -- ctx->mxr_ver == MXR_VER_128_0_0_184) -- mixer_layer_update(ctx); -- - spin_unlock_irqrestore(&ctx->reg_slock, flags); - - mixer_regs_dump(ctx); -@@ -709,7 +746,7 @@ static void mixer_win_reset(struct mixer_context *ctx) - static irqreturn_t mixer_irq_handler(int irq, void *arg) - { - struct mixer_context *ctx = arg; -- u32 val, base, shadow; -+ u32 val; - - spin_lock(&ctx->reg_slock); - -@@ -723,26 +760,9 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) - val &= ~MXR_INT_STATUS_VSYNC; - - /* interlace scan need to check shadow register */ -- if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { -- if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) && -- vp_reg_read(ctx, VP_SHADOW_UPDATE)) -- goto out; -- -- base = mixer_reg_read(ctx, MXR_CFG); -- shadow = mixer_reg_read(ctx, MXR_CFG_S); -- if (base != shadow) -- goto out; -- -- base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0)); -- shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0)); -- if (base != shadow) -- goto out; -- -- base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1)); -- shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1)); -- if (base != shadow) -- goto out; -- } -+ if (test_bit(MXR_BIT_INTERLACE, &ctx->flags) -+ && !mixer_is_synced(ctx)) -+ goto out; - - drm_crtc_handle_vblank(&ctx->crtc->base); - } -@@ -917,12 +937,14 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc) - - static void mixer_atomic_begin(struct exynos_drm_crtc *crtc) - { -- struct mixer_context *mixer_ctx = crtc->ctx; -+ struct mixer_context *ctx = crtc->ctx; - -- if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) -+ if (!test_bit(MXR_BIT_POWERED, &ctx->flags)) - return; - -- mixer_vsync_set_update(mixer_ctx, false); -+ if (mixer_wait_for_sync(ctx)) -+ dev_err(ctx->dev, "timeout waiting for VSYNC\n"); -+ mixer_disable_sync(ctx); - } - - static void mixer_update_plane(struct exynos_drm_crtc *crtc, -@@ -964,7 +986,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc) - if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) - return; - -- mixer_vsync_set_update(mixer_ctx, true); -+ mixer_enable_sync(mixer_ctx); - exynos_crtc_handle_event(crtc); - } - -@@ -979,7 +1001,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc) - - exynos_drm_pipe_clk_enable(crtc, true); - -- mixer_vsync_set_update(ctx, false); -+ mixer_disable_sync(ctx); - - mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); - -@@ -992,7 +1014,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc) - - mixer_commit(ctx); - -- mixer_vsync_set_update(ctx, true); -+ mixer_enable_sync(ctx); - - set_bit(MXR_BIT_POWERED, &ctx->flags); - } -diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h -index 8a0f85f5fc1a..6a765682fbfa 100644 ---- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h -+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h -@@ -38,6 +38,7 @@ int nvkm_volt_set_id(struct nvkm_volt *, u8 id, u8 min_id, u8 temp, - - int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **); - int gf100_volt_new(struct nvkm_device *, int, struct nvkm_volt **); -+int gf117_volt_new(struct nvkm_device *, int, struct nvkm_volt **); - int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **); - int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **); - int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **); -diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c -index 88a52f6b39fe..7dfbbbc1beea 100644 ---- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c -+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c -@@ -181,7 +181,7 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf, - } - - ret = pm_runtime_get_sync(drm->dev); -- if (IS_ERR_VALUE(ret) && ret != -EACCES) -+ if (ret < 0 && ret != -EACCES) - return ret; - ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args)); - pm_runtime_put_autosuspend(drm->dev); -diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c -index d9edb5785813..d75fa7678483 100644 ---- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c -+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c -@@ -1613,7 +1613,7 @@ nvd7_chipset = { - .pci = gf106_pci_new, - .therm = gf119_therm_new, - .timer = nv41_timer_new, -- .volt = gf100_volt_new, -+ .volt = gf117_volt_new, - .ce[0] = gf100_ce_new, - .disp = gf119_disp_new, - .dma = gf119_dma_new, -diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild -index bcd179ba11d0..146adcdd316a 100644 ---- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild -+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild -@@ -2,6 +2,7 @@ nvkm-y += nvkm/subdev/volt/base.o - nvkm-y += nvkm/subdev/volt/gpio.o - nvkm-y += nvkm/subdev/volt/nv40.o - nvkm-y += nvkm/subdev/volt/gf100.o -+nvkm-y += nvkm/subdev/volt/gf117.o - nvkm-y += nvkm/subdev/volt/gk104.o - nvkm-y += nvkm/subdev/volt/gk20a.o - nvkm-y += nvkm/subdev/volt/gm20b.o -diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c -new file mode 100644 -index 000000000000..547a58f0aeac ---- /dev/null -+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c -@@ -0,0 +1,60 @@ -+/* -+ * Copyright 2019 Ilia Mirkin -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -+ * OTHER DEALINGS IN THE SOFTWARE. -+ * -+ * Authors: Ilia Mirkin -+ */ -+#include "priv.h" -+ -+#include -+ -+static int -+gf117_volt_speedo_read(struct nvkm_volt *volt) -+{ -+ struct nvkm_device *device = volt->subdev.device; -+ struct nvkm_fuse *fuse = device->fuse; -+ -+ if (!fuse) -+ return -EINVAL; -+ -+ return nvkm_fuse_read(fuse, 0x3a8); -+} -+ -+static const struct nvkm_volt_func -+gf117_volt = { -+ .oneinit = gf100_volt_oneinit, -+ .vid_get = nvkm_voltgpio_get, -+ .vid_set = nvkm_voltgpio_set, -+ .speedo_read = gf117_volt_speedo_read, -+}; -+ -+int -+gf117_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt) -+{ -+ struct nvkm_volt *volt; -+ int ret; -+ -+ ret = nvkm_volt_new_(&gf117_volt, device, index, &volt); -+ *pvolt = volt; -+ if (ret) -+ return ret; -+ -+ return nvkm_voltgpio_init(volt); -+} -diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c -index ca4ae45dd307..8e5724b63f1f 100644 ---- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c -+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c -@@ -70,18 +70,12 @@ static inline struct innolux_panel *to_innolux_panel(struct drm_panel *panel) - static int innolux_panel_disable(struct drm_panel *panel) - { - struct innolux_panel *innolux = to_innolux_panel(panel); -- int err; - - if (!innolux->enabled) - return 0; - - backlight_disable(innolux->backlight); - -- err = mipi_dsi_dcs_set_display_off(innolux->link); -- if (err < 0) -- DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n", -- err); -- - innolux->enabled = false; - - return 0; -@@ -95,6 +89,11 @@ static int innolux_panel_unprepare(struct drm_panel *panel) - if (!innolux->prepared) - return 0; - -+ err = mipi_dsi_dcs_set_display_off(innolux->link); -+ if (err < 0) -+ DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n", -+ err); -+ - err = mipi_dsi_dcs_enter_sleep_mode(innolux->link); - if (err < 0) { - DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n", -diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c -index d5a23295dd80..bb7b58407039 100644 ---- a/drivers/gpu/drm/udl/udl_gem.c -+++ b/drivers/gpu/drm/udl/udl_gem.c -@@ -224,7 +224,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, - *offset = drm_vma_node_offset_addr(&gobj->base.vma_node); - - out: -- drm_gem_object_put(&gobj->base); -+ drm_gem_object_put_unlocked(&gobj->base); - unlock: - mutex_unlock(&udl->gem_lock); - return ret; -diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c -index 45b2460f3166..e8819d750938 100644 ---- a/drivers/hwtracing/coresight/coresight-cpu-debug.c -+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c -@@ -668,6 +668,10 @@ static const struct amba_id debug_ids[] = { - .id = 0x000bbd08, - .mask = 0x000fffff, - }, -+ { /* Debug for Cortex-A73 */ -+ .id = 0x000bbd09, -+ .mask = 0x000fffff, -+ }, - { 0, 0 }, - }; - -diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c -index 5344e8993b28..5866f358ea04 100644 ---- a/drivers/infiniband/hw/hfi1/qp.c -+++ b/drivers/infiniband/hw/hfi1/qp.c -@@ -833,7 +833,7 @@ void notify_error_qp(struct rvt_qp *qp) - write_seqlock(lock); - if (!list_empty(&priv->s_iowait.list) && - !(qp->s_flags & RVT_S_BUSY)) { -- qp->s_flags &= ~RVT_S_ANY_WAIT_IO; -+ qp->s_flags &= ~HFI1_S_ANY_WAIT_IO; - list_del_init(&priv->s_iowait.list); - priv->s_iowait.lock = NULL; - rvt_put_qp(qp); -diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h -index 509e467843f6..f4cac63194d9 100644 ---- a/drivers/infiniband/hw/hns/hns_roce_device.h -+++ b/drivers/infiniband/hw/hns/hns_roce_device.h -@@ -216,6 +216,26 @@ enum { - HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4 - }; - -+enum hns_roce_reset_stage { -+ HNS_ROCE_STATE_NON_RST, -+ HNS_ROCE_STATE_RST_BEF_DOWN, -+ HNS_ROCE_STATE_RST_DOWN, -+ HNS_ROCE_STATE_RST_UNINIT, -+ HNS_ROCE_STATE_RST_INIT, -+ HNS_ROCE_STATE_RST_INITED, -+}; -+ -+enum hns_roce_instance_state { -+ HNS_ROCE_STATE_NON_INIT, -+ HNS_ROCE_STATE_INIT, -+ HNS_ROCE_STATE_INITED, -+ HNS_ROCE_STATE_UNINIT, -+}; -+ -+enum { -+ HNS_ROCE_RST_DIRECT_RETURN = 0, -+}; -+ - #define HNS_ROCE_CMD_SUCCESS 1 - - #define HNS_ROCE_PORT_DOWN 0 -@@ -898,6 +918,7 @@ struct hns_roce_dev { - spinlock_t bt_cmd_lock; - bool active; - bool is_reset; -+ unsigned long reset_cnt; - struct hns_roce_ib_iboe iboe; - - struct list_head pgdir_list; -diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c -index 543fa1504cd3..7ac06576d791 100644 ---- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c -+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c -@@ -5800,6 +5800,7 @@ MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl); - static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, - struct hnae3_handle *handle) - { -+ struct hns_roce_v2_priv *priv = hr_dev->priv; - const struct pci_device_id *id; - int i; - -@@ -5830,10 +5831,13 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, - hr_dev->cmd_mod = 1; - hr_dev->loop_idc = 0; - -+ hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle); -+ priv->handle = handle; -+ - return 0; - } - --static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) -+static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) - { - struct hns_roce_dev *hr_dev; - int ret; -@@ -5850,7 +5854,6 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) - - hr_dev->pci_dev = handle->pdev; - hr_dev->dev = &handle->pdev->dev; -- handle->priv = hr_dev; - - ret = hns_roce_hw_v2_get_cfg(hr_dev, handle); - if (ret) { -@@ -5864,6 +5867,8 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) - goto error_failed_get_cfg; - } - -+ handle->priv = hr_dev; -+ - return 0; - - error_failed_get_cfg: -@@ -5875,7 +5880,7 @@ error_failed_kzalloc: - return ret; - } - --static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, -+static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, - bool reset) - { - struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv; -@@ -5883,24 +5888,78 @@ static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, - if (!hr_dev) - return; - -+ handle->priv = NULL; - hns_roce_exit(hr_dev); - kfree(hr_dev->priv); - ib_dealloc_device(&hr_dev->ib_dev); - } - -+static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) -+{ -+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops; -+ struct device *dev = &handle->pdev->dev; -+ int ret; -+ -+ handle->rinfo.instance_state = HNS_ROCE_STATE_INIT; -+ -+ if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) { -+ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; -+ goto reset_chk_err; -+ } -+ -+ ret = __hns_roce_hw_v2_init_instance(handle); -+ if (ret) { -+ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; -+ dev_err(dev, "RoCE instance init failed! ret = %d\n", ret); -+ if (ops->ae_dev_resetting(handle) || -+ ops->get_hw_reset_stat(handle)) -+ goto reset_chk_err; -+ else -+ return ret; -+ } -+ -+ handle->rinfo.instance_state = HNS_ROCE_STATE_INITED; -+ -+ -+ return 0; -+ -+reset_chk_err: -+ dev_err(dev, "Device is busy in resetting state.\n" -+ "please retry later.\n"); -+ -+ return -EBUSY; -+} -+ -+static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, -+ bool reset) -+{ -+ if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) -+ return; -+ -+ handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT; -+ -+ __hns_roce_hw_v2_uninit_instance(handle, reset); -+ -+ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; -+} - static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle) - { -- struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv; -+ struct hns_roce_dev *hr_dev; - struct ib_event event; - -- if (!hr_dev) { -- dev_err(&handle->pdev->dev, -- "Input parameter handle->priv is NULL!\n"); -- return -EINVAL; -+ if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) { -+ set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state); -+ return 0; - } - -+ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN; -+ clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state); -+ -+ hr_dev = (struct hns_roce_dev *)handle->priv; -+ if (!hr_dev) -+ return 0; -+ - hr_dev->active = false; -- hr_dev->is_reset = true; - - event.event = IB_EVENT_DEVICE_FATAL; - event.device = &hr_dev->ib_dev; -@@ -5912,17 +5971,29 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle) - - static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle) - { -+ struct device *dev = &handle->pdev->dev; - int ret; - -- ret = hns_roce_hw_v2_init_instance(handle); -+ if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN, -+ &handle->rinfo.state)) { -+ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED; -+ return 0; -+ } -+ -+ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT; -+ -+ dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n"); -+ ret = __hns_roce_hw_v2_init_instance(handle); - if (ret) { - /* when reset notify type is HNAE3_INIT_CLIENT In reset notify - * callback function, RoCE Engine reinitialize. If RoCE reinit - * failed, we should inform NIC driver. - */ - handle->priv = NULL; -- dev_err(&handle->pdev->dev, -- "In reset process RoCE reinit failed %d.\n", ret); -+ dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret); -+ } else { -+ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED; -+ dev_info(dev, "Reset done, RoCE client reinit finished.\n"); - } - - return ret; -@@ -5930,8 +6001,14 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle) - - static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle) - { -+ if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state)) -+ return 0; -+ -+ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT; -+ dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n"); - msleep(100); -- hns_roce_hw_v2_uninit_instance(handle, false); -+ __hns_roce_hw_v2_uninit_instance(handle, false); -+ - return 0; - } - -diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h -index b72d0443c835..5398aa718cfc 100644 ---- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h -+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h -@@ -1546,6 +1546,7 @@ struct hns_roce_link_table_entry { - #define HNS_ROCE_LINK_TABLE_NXT_PTR_M GENMASK(31, 20) - - struct hns_roce_v2_priv { -+ struct hnae3_handle *handle; - struct hns_roce_v2_cmq cmq; - struct hns_roce_link_table tsq; - struct hns_roce_link_table tpq; -diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c -index 59e978141ad4..e99177533930 100644 ---- a/drivers/infiniband/hw/i40iw/i40iw_utils.c -+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c -@@ -173,7 +173,12 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, - - rcu_read_lock(); - in = __in_dev_get_rcu(upper_dev); -- local_ipaddr = ntohl(in->ifa_list->ifa_address); -+ -+ if (!in->ifa_list) -+ local_ipaddr = 0; -+ else -+ local_ipaddr = ntohl(in->ifa_list->ifa_address); -+ - rcu_read_unlock(); - } else { - local_ipaddr = ntohl(ifa->ifa_address); -@@ -185,6 +190,11 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, - case NETDEV_UP: - /* Fall through */ - case NETDEV_CHANGEADDR: -+ -+ /* Just skip if no need to handle ARP cache */ -+ if (!local_ipaddr) -+ break; -+ - i40iw_manage_arp_cache(iwdev, - netdev->dev_addr, - &local_ipaddr, -diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c -index 782499abcd98..2a0b59a4b6eb 100644 ---- a/drivers/infiniband/hw/mlx4/alias_GUID.c -+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c -@@ -804,8 +804,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev) - unsigned long flags; - - for (i = 0 ; i < dev->num_ports; i++) { -- cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work); - det = &sriov->alias_guid.ports_guid[i]; -+ cancel_delayed_work_sync(&det->alias_guid_work); - spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags); - while (!list_empty(&det->cb_list)) { - cb_ctx = list_entry(det->cb_list.next, -diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c -index dbd6824dfffa..53b1fbadc496 100644 ---- a/drivers/iommu/intel-iommu.c -+++ b/drivers/iommu/intel-iommu.c -@@ -1534,6 +1534,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) - u32 pmen; - unsigned long flags; - -+ if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap)) -+ return; -+ - raw_spin_lock_irqsave(&iommu->register_lock, flags); - pmen = readl(iommu->reg + DMAR_PMEN_REG); - pmen &= ~DMA_PMEN_EPM; -@@ -5328,7 +5331,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd - - ctx_lo = context[0].lo; - -- sdev->did = domain->iommu_did[iommu->seq_id]; -+ sdev->did = FLPT_DEFAULT_DID; - sdev->sid = PCI_DEVID(info->bus, info->devfn); - - if (!(ctx_lo & CONTEXT_PASIDE)) { -diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c -index 567b29c47608..98b6e1d4b1a6 100644 ---- a/drivers/irqchip/irq-mbigen.c -+++ b/drivers/irqchip/irq-mbigen.c -@@ -161,6 +161,9 @@ static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg) - void __iomem *base = d->chip_data; - u32 val; - -+ if (!msg->address_lo && !msg->address_hi) -+ return; -+ - base += get_mbigen_vec_reg(d->hwirq); - val = readl_relaxed(base); - -diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c -index a93296b9b45d..7bd1d4cb2e19 100644 ---- a/drivers/irqchip/irq-stm32-exti.c -+++ b/drivers/irqchip/irq-stm32-exti.c -@@ -716,7 +716,6 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data, - const struct stm32_exti_bank *stm32_bank; - struct stm32_exti_chip_data *chip_data; - void __iomem *base = h_data->base; -- u32 irqs_mask; - - stm32_bank = h_data->drv_data->exti_banks[bank_idx]; - chip_data = &h_data->chips_data[bank_idx]; -@@ -725,21 +724,12 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data, - - raw_spin_lock_init(&chip_data->rlock); - -- /* Determine number of irqs supported */ -- writel_relaxed(~0UL, base + stm32_bank->rtsr_ofst); -- irqs_mask = readl_relaxed(base + stm32_bank->rtsr_ofst); -- - /* - * This IP has no reset, so after hot reboot we should - * clear registers to avoid residue - */ - writel_relaxed(0, base + stm32_bank->imr_ofst); - writel_relaxed(0, base + stm32_bank->emr_ofst); -- writel_relaxed(0, base + stm32_bank->rtsr_ofst); -- writel_relaxed(0, base + stm32_bank->ftsr_ofst); -- writel_relaxed(~0UL, base + stm32_bank->rpr_ofst); -- if (stm32_bank->fpr_ofst != UNDEF_REG) -- writel_relaxed(~0UL, base + stm32_bank->fpr_ofst); - - pr_info("%pOF: bank%d\n", h_data->node, bank_idx); - -diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c -index 2837dc77478e..f0f9eb30bd2b 100644 ---- a/drivers/misc/lkdtm/core.c -+++ b/drivers/misc/lkdtm/core.c -@@ -152,7 +152,9 @@ static const struct crashtype crashtypes[] = { - CRASHTYPE(EXEC_VMALLOC), - CRASHTYPE(EXEC_RODATA), - CRASHTYPE(EXEC_USERSPACE), -+ CRASHTYPE(EXEC_NULL), - CRASHTYPE(ACCESS_USERSPACE), -+ CRASHTYPE(ACCESS_NULL), - CRASHTYPE(WRITE_RO), - CRASHTYPE(WRITE_RO_AFTER_INIT), - CRASHTYPE(WRITE_KERN), -diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h -index 3c6fd327e166..b69ee004a3f7 100644 ---- a/drivers/misc/lkdtm/lkdtm.h -+++ b/drivers/misc/lkdtm/lkdtm.h -@@ -45,7 +45,9 @@ void lkdtm_EXEC_KMALLOC(void); - void lkdtm_EXEC_VMALLOC(void); - void lkdtm_EXEC_RODATA(void); - void lkdtm_EXEC_USERSPACE(void); -+void lkdtm_EXEC_NULL(void); - void lkdtm_ACCESS_USERSPACE(void); -+void lkdtm_ACCESS_NULL(void); - - /* lkdtm_refcount.c */ - void lkdtm_REFCOUNT_INC_OVERFLOW(void); -diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c -index 53b85c9d16b8..62f76d506f04 100644 ---- a/drivers/misc/lkdtm/perms.c -+++ b/drivers/misc/lkdtm/perms.c -@@ -47,7 +47,7 @@ static noinline void execute_location(void *dst, bool write) - { - void (*func)(void) = dst; - -- pr_info("attempting ok execution at %p\n", do_nothing); -+ pr_info("attempting ok execution at %px\n", do_nothing); - do_nothing(); - - if (write == CODE_WRITE) { -@@ -55,7 +55,7 @@ static noinline void execute_location(void *dst, bool write) - flush_icache_range((unsigned long)dst, - (unsigned long)dst + EXEC_SIZE); - } -- pr_info("attempting bad execution at %p\n", func); -+ pr_info("attempting bad execution at %px\n", func); - func(); - } - -@@ -66,14 +66,14 @@ static void execute_user_location(void *dst) - /* Intentionally crossing kernel/user memory boundary. */ - void (*func)(void) = dst; - -- pr_info("attempting ok execution at %p\n", do_nothing); -+ pr_info("attempting ok execution at %px\n", do_nothing); - do_nothing(); - - copied = access_process_vm(current, (unsigned long)dst, do_nothing, - EXEC_SIZE, FOLL_WRITE); - if (copied < EXEC_SIZE) - return; -- pr_info("attempting bad execution at %p\n", func); -+ pr_info("attempting bad execution at %px\n", func); - func(); - } - -@@ -82,7 +82,7 @@ void lkdtm_WRITE_RO(void) - /* Explicitly cast away "const" for the test. */ - unsigned long *ptr = (unsigned long *)&rodata; - -- pr_info("attempting bad rodata write at %p\n", ptr); -+ pr_info("attempting bad rodata write at %px\n", ptr); - *ptr ^= 0xabcd1234; - } - -@@ -100,7 +100,7 @@ void lkdtm_WRITE_RO_AFTER_INIT(void) - return; - } - -- pr_info("attempting bad ro_after_init write at %p\n", ptr); -+ pr_info("attempting bad ro_after_init write at %px\n", ptr); - *ptr ^= 0xabcd1234; - } - -@@ -112,7 +112,7 @@ void lkdtm_WRITE_KERN(void) - size = (unsigned long)do_overwritten - (unsigned long)do_nothing; - ptr = (unsigned char *)do_overwritten; - -- pr_info("attempting bad %zu byte write at %p\n", size, ptr); -+ pr_info("attempting bad %zu byte write at %px\n", size, ptr); - memcpy(ptr, (unsigned char *)do_nothing, size); - flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size)); - -@@ -164,6 +164,11 @@ void lkdtm_EXEC_USERSPACE(void) - vm_munmap(user_addr, PAGE_SIZE); - } - -+void lkdtm_EXEC_NULL(void) -+{ -+ execute_location(NULL, CODE_AS_IS); -+} -+ - void lkdtm_ACCESS_USERSPACE(void) - { - unsigned long user_addr, tmp = 0; -@@ -185,16 +190,29 @@ void lkdtm_ACCESS_USERSPACE(void) - - ptr = (unsigned long *)user_addr; - -- pr_info("attempting bad read at %p\n", ptr); -+ pr_info("attempting bad read at %px\n", ptr); - tmp = *ptr; - tmp += 0xc0dec0de; - -- pr_info("attempting bad write at %p\n", ptr); -+ pr_info("attempting bad write at %px\n", ptr); - *ptr = tmp; - - vm_munmap(user_addr, PAGE_SIZE); - } - -+void lkdtm_ACCESS_NULL(void) -+{ -+ unsigned long tmp; -+ unsigned long *ptr = (unsigned long *)NULL; -+ -+ pr_info("attempting bad read at %px\n", ptr); -+ tmp = *ptr; -+ tmp += 0xc0dec0de; -+ -+ pr_info("attempting bad write at %px\n", ptr); -+ *ptr = tmp; -+} -+ - void __init lkdtm_perms_init(void) - { - /* Make sure we can write to __ro_after_init values during __init */ -diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c -index 9e68c3645e22..e6f14257a7d0 100644 ---- a/drivers/mmc/host/davinci_mmc.c -+++ b/drivers/mmc/host/davinci_mmc.c -@@ -1117,7 +1117,7 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) - { - } - #endif --static void __init init_mmcsd_host(struct mmc_davinci_host *host) -+static void init_mmcsd_host(struct mmc_davinci_host *host) - { - - mmc_davinci_reset_ctrl(host, 1); -diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h -index 09c774fe8853..854a55d4332a 100644 ---- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h -+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h -@@ -463,6 +463,8 @@ struct hnae3_ae_ops { - int (*set_gro_en)(struct hnae3_handle *handle, int enable); - u16 (*get_global_queue_id)(struct hnae3_handle *handle, u16 queue_id); - void (*set_timer_task)(struct hnae3_handle *handle, bool enable); -+ int (*mac_connect_phy)(struct hnae3_handle *handle); -+ void (*mac_disconnect_phy)(struct hnae3_handle *handle); - }; - - struct hnae3_dcb_ops { -diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c -index d84c50068f66..40b69eaf2cb3 100644 ---- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c -+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c -@@ -3519,6 +3519,25 @@ static int hns3_init_mac_addr(struct net_device *netdev, bool init) - return ret; - } - -+static int hns3_init_phy(struct net_device *netdev) -+{ -+ struct hnae3_handle *h = hns3_get_handle(netdev); -+ int ret = 0; -+ -+ if (h->ae_algo->ops->mac_connect_phy) -+ ret = h->ae_algo->ops->mac_connect_phy(h); -+ -+ return ret; -+} -+ -+static void hns3_uninit_phy(struct net_device *netdev) -+{ -+ struct hnae3_handle *h = hns3_get_handle(netdev); -+ -+ if (h->ae_algo->ops->mac_disconnect_phy) -+ h->ae_algo->ops->mac_disconnect_phy(h); -+} -+ - static int hns3_restore_fd_rules(struct net_device *netdev) - { - struct hnae3_handle *h = hns3_get_handle(netdev); -@@ -3627,6 +3646,10 @@ static int hns3_client_init(struct hnae3_handle *handle) - goto out_init_ring_data; - } - -+ ret = hns3_init_phy(netdev); -+ if (ret) -+ goto out_init_phy; -+ - ret = register_netdev(netdev); - if (ret) { - dev_err(priv->dev, "probe register netdev fail!\n"); -@@ -3651,6 +3674,9 @@ static int hns3_client_init(struct hnae3_handle *handle) - return ret; - - out_reg_netdev_fail: -+ hns3_uninit_phy(netdev); -+out_init_phy: -+ hns3_uninit_all_ring(priv); - out_init_ring_data: - (void)hns3_nic_uninit_vector_data(priv); - out_init_vector_data: -@@ -3685,6 +3711,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) - - hns3_force_clear_all_rx_ring(handle); - -+ hns3_uninit_phy(netdev); -+ - ret = hns3_nic_uninit_vector_data(priv); - if (ret) - netdev_err(netdev, "uninit vector error\n"); -diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c -index f7637c08bb3a..cb7571747af7 100644 ---- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c -+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c -@@ -6959,16 +6959,6 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle, - *tp_mdix = ETH_TP_MDI; - } - --static int hclge_init_instance_hw(struct hclge_dev *hdev) --{ -- return hclge_mac_connect_phy(hdev); --} -- --static void hclge_uninit_instance_hw(struct hclge_dev *hdev) --{ -- hclge_mac_disconnect_phy(hdev); --} -- - static int hclge_init_client_instance(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev) - { -@@ -6988,13 +6978,6 @@ static int hclge_init_client_instance(struct hnae3_client *client, - if (ret) - goto clear_nic; - -- ret = hclge_init_instance_hw(hdev); -- if (ret) { -- client->ops->uninit_instance(&vport->nic, -- 0); -- goto clear_nic; -- } -- - hnae3_set_client_init_flag(client, ae_dev, 1); - - if (hdev->roce_client && -@@ -7079,7 +7062,6 @@ static void hclge_uninit_client_instance(struct hnae3_client *client, - if (client->type == HNAE3_CLIENT_ROCE) - return; - if (hdev->nic_client && client->ops->uninit_instance) { -- hclge_uninit_instance_hw(hdev); - client->ops->uninit_instance(&vport->nic, 0); - hdev->nic_client = NULL; - vport->nic.client = NULL; -@@ -8012,6 +7994,8 @@ static const struct hnae3_ae_ops hclge_ops = { - .set_gro_en = hclge_gro_en, - .get_global_queue_id = hclge_covert_handle_qid_global, - .set_timer_task = hclge_set_timer_task, -+ .mac_connect_phy = hclge_mac_connect_phy, -+ .mac_disconnect_phy = hclge_mac_disconnect_phy, - }; - - static struct hnae3_ae_algo ae_algo = { -diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c -index dabb8437f8dc..84f28785ba28 100644 ---- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c -+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c -@@ -195,8 +195,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev) - netdev_err(netdev, "failed to configure flow control.\n"); - } - --int hclge_mac_connect_phy(struct hclge_dev *hdev) -+int hclge_mac_connect_phy(struct hnae3_handle *handle) - { -+ struct hclge_vport *vport = hclge_get_vport(handle); -+ struct hclge_dev *hdev = vport->back; - struct net_device *netdev = hdev->vport[0].nic.netdev; - struct phy_device *phydev = hdev->hw.mac.phydev; - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; -@@ -229,8 +231,10 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev) - return 0; - } - --void hclge_mac_disconnect_phy(struct hclge_dev *hdev) -+void hclge_mac_disconnect_phy(struct hnae3_handle *handle) - { -+ struct hclge_vport *vport = hclge_get_vport(handle); -+ struct hclge_dev *hdev = vport->back; - struct phy_device *phydev = hdev->hw.mac.phydev; - - if (!phydev) -diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h -index 5fbf7dddb5d9..ef095d9c566f 100644 ---- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h -+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h -@@ -5,8 +5,8 @@ - #define __HCLGE_MDIO_H - - int hclge_mac_mdio_config(struct hclge_dev *hdev); --int hclge_mac_connect_phy(struct hclge_dev *hdev); --void hclge_mac_disconnect_phy(struct hclge_dev *hdev); -+int hclge_mac_connect_phy(struct hnae3_handle *handle); -+void hclge_mac_disconnect_phy(struct hnae3_handle *handle); - void hclge_mac_start_phy(struct hclge_dev *hdev); - void hclge_mac_stop_phy(struct hclge_dev *hdev); - -diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c -index c25acace7d91..e91005d0f20c 100644 ---- a/drivers/pci/pci.c -+++ b/drivers/pci/pci.c -@@ -1233,7 +1233,6 @@ static void pci_restore_pcie_state(struct pci_dev *dev) - pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]); - } - -- - static int pci_save_pcix_state(struct pci_dev *dev) - { - int pos; -@@ -1270,6 +1269,45 @@ static void pci_restore_pcix_state(struct pci_dev *dev) - pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); - } - -+static void pci_save_ltr_state(struct pci_dev *dev) -+{ -+ int ltr; -+ struct pci_cap_saved_state *save_state; -+ u16 *cap; -+ -+ if (!pci_is_pcie(dev)) -+ return; -+ -+ ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); -+ if (!ltr) -+ return; -+ -+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); -+ if (!save_state) { -+ pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n"); -+ return; -+ } -+ -+ cap = (u16 *)&save_state->cap.data[0]; -+ pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++); -+ pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++); -+} -+ -+static void pci_restore_ltr_state(struct pci_dev *dev) -+{ -+ struct pci_cap_saved_state *save_state; -+ int ltr; -+ u16 *cap; -+ -+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); -+ ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); -+ if (!save_state || !ltr) -+ return; -+ -+ cap = (u16 *)&save_state->cap.data[0]; -+ pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++); -+ pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++); -+} - - /** - * pci_save_state - save the PCI configuration space of a device before suspending -@@ -1291,6 +1329,7 @@ int pci_save_state(struct pci_dev *dev) - if (i != 0) - return i; - -+ pci_save_ltr_state(dev); - pci_save_dpc_state(dev); - return pci_save_vc_state(dev); - } -@@ -1390,7 +1429,12 @@ void pci_restore_state(struct pci_dev *dev) - if (!dev->state_saved) - return; - -- /* PCI Express register must be restored first */ -+ /* -+ * Restore max latencies (in the LTR capability) before enabling -+ * LTR itself (in the PCIe capability). -+ */ -+ pci_restore_ltr_state(dev); -+ - pci_restore_pcie_state(dev); - pci_restore_pasid_state(dev); - pci_restore_pri_state(dev); -@@ -2501,6 +2545,25 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev) - pm_runtime_put_sync(parent); - } - -+static const struct dmi_system_id bridge_d3_blacklist[] = { -+#ifdef CONFIG_X86 -+ { -+ /* -+ * Gigabyte X299 root port is not marked as hotplug capable -+ * which allows Linux to power manage it. However, this -+ * confuses the BIOS SMI handler so don't power manage root -+ * ports on that system. -+ */ -+ .ident = "X299 DESIGNARE EX-CF", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), -+ DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"), -+ }, -+ }, -+#endif -+ { } -+}; -+ - /** - * pci_bridge_d3_possible - Is it possible to put the bridge into D3 - * @bridge: Bridge to check -@@ -2546,6 +2609,9 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge) - if (bridge->is_hotplug_bridge) - return false; - -+ if (dmi_check_system(bridge_d3_blacklist)) -+ return false; -+ - /* - * It should be safe to put PCIe ports from 2015 or newer - * to D3. -@@ -2998,6 +3064,11 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev) - if (error) - pci_err(dev, "unable to preallocate PCI-X save buffer\n"); - -+ error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR, -+ 2 * sizeof(u16)); -+ if (error) -+ pci_err(dev, "unable to allocate suspend buffer for LTR\n"); -+ - pci_allocate_vc_save_buffers(dev); - } - -diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c -index c37e74ee609d..a9cbe5be277b 100644 ---- a/drivers/platform/x86/intel_pmc_core.c -+++ b/drivers/platform/x86/intel_pmc_core.c -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -139,6 +140,7 @@ static const struct pmc_reg_map spt_reg_map = { - .pm_cfg_offset = SPT_PMC_PM_CFG_OFFSET, - .pm_read_disable_bit = SPT_PMC_READ_DISABLE_BIT, - .ltr_ignore_max = SPT_NUM_IP_IGN_ALLOWED, -+ .pm_vric1_offset = SPT_PMC_VRIC1_OFFSET, - }; - - /* Cannonlake: PGD PFET Enable Ack Status Register(s) bitmap */ -@@ -751,6 +753,37 @@ static const struct pci_device_id pmc_pci_ids[] = { - { 0, }, - }; - -+/* -+ * This quirk can be used on those platforms where -+ * the platform BIOS enforces 24Mhx Crystal to shutdown -+ * before PMC can assert SLP_S0#. -+ */ -+int quirk_xtal_ignore(const struct dmi_system_id *id) -+{ -+ struct pmc_dev *pmcdev = &pmc; -+ u32 value; -+ -+ value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset); -+ /* 24MHz Crystal Shutdown Qualification Disable */ -+ value |= SPT_PMC_VRIC1_XTALSDQDIS; -+ /* Low Voltage Mode Enable */ -+ value &= ~SPT_PMC_VRIC1_SLPS0LVEN; -+ pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value); -+ return 0; -+} -+ -+static const struct dmi_system_id pmc_core_dmi_table[] = { -+ { -+ .callback = quirk_xtal_ignore, -+ .ident = "HP Elite x2 1013 G3", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "HP"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite x2 1013 G3"), -+ }, -+ }, -+ {} -+}; -+ - static int __init pmc_core_probe(void) - { - struct pmc_dev *pmcdev = &pmc; -@@ -792,6 +825,7 @@ static int __init pmc_core_probe(void) - return err; - } - -+ dmi_check_system(pmc_core_dmi_table); - pr_info(" initialized\n"); - return 0; - } -diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h -index 1a0104d2cbf0..9bc16d7d2917 100644 ---- a/drivers/platform/x86/intel_pmc_core.h -+++ b/drivers/platform/x86/intel_pmc_core.h -@@ -25,6 +25,7 @@ - #define SPT_PMC_MTPMC_OFFSET 0x20 - #define SPT_PMC_MFPMC_OFFSET 0x38 - #define SPT_PMC_LTR_IGNORE_OFFSET 0x30C -+#define SPT_PMC_VRIC1_OFFSET 0x31c - #define SPT_PMC_MPHY_CORE_STS_0 0x1143 - #define SPT_PMC_MPHY_CORE_STS_1 0x1142 - #define SPT_PMC_MPHY_COM_STS_0 0x1155 -@@ -135,6 +136,9 @@ enum ppfear_regs { - #define SPT_PMC_BIT_MPHY_CMN_LANE2 BIT(2) - #define SPT_PMC_BIT_MPHY_CMN_LANE3 BIT(3) - -+#define SPT_PMC_VRIC1_SLPS0LVEN BIT(13) -+#define SPT_PMC_VRIC1_XTALSDQDIS BIT(22) -+ - /* Cannonlake Power Management Controller register offsets */ - #define CNP_PMC_SLPS0_DBG_OFFSET 0x10B4 - #define CNP_PMC_PM_CFG_OFFSET 0x1818 -@@ -217,6 +221,7 @@ struct pmc_reg_map { - const int pm_read_disable_bit; - const u32 slps0_dbg_offset; - const u32 ltr_ignore_max; -+ const u32 pm_vric1_offset; - }; - - /** -diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c -index 7e35ce2162d0..503fda4e7e8e 100644 ---- a/drivers/scsi/qla2xxx/qla_os.c -+++ b/drivers/scsi/qla2xxx/qla_os.c -@@ -1459,7 +1459,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, - goto eh_reset_failed; - } - err = 2; -- if (do_reset(fcport, cmd->device->lun, blk_mq_rq_cpu(cmd->request) + 1) -+ if (do_reset(fcport, cmd->device->lun, 1) - != QLA_SUCCESS) { - ql_log(ql_log_warn, vha, 0x800c, - "do_reset failed for cmd=%p.\n", cmd); -diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c -index 5a6e8e12701a..655ad26106e4 100644 ---- a/drivers/scsi/scsi_lib.c -+++ b/drivers/scsi/scsi_lib.c -@@ -598,9 +598,16 @@ static bool scsi_end_request(struct request *req, blk_status_t error, - if (!blk_rq_is_scsi(req)) { - WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); - cmd->flags &= ~SCMD_INITIALIZED; -- destroy_rcu_head(&cmd->rcu); - } - -+ /* -+ * Calling rcu_barrier() is not necessary here because the -+ * SCSI error handler guarantees that the function called by -+ * call_rcu() has been called before scsi_end_request() is -+ * called. -+ */ -+ destroy_rcu_head(&cmd->rcu); -+ - /* - * In the MQ case the command gets freed by __blk_mq_end_request, - * so we have to do all cleanup that depends on it earlier. -diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c -index 0508831d6fb9..0a82e93566dc 100644 ---- a/drivers/scsi/scsi_transport_iscsi.c -+++ b/drivers/scsi/scsi_transport_iscsi.c -@@ -2200,6 +2200,8 @@ void iscsi_remove_session(struct iscsi_cls_session *session) - scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); - /* flush running scans then delete devices */ - flush_work(&session->scan_work); -+ /* flush running unbind operations */ -+ flush_work(&session->unbind_work); - __iscsi_unbind_session(&session->unbind_work); - - /* hw iscsi may not have removed all connections from session */ -diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c -index 720760cd493f..ba39647a690c 100644 ---- a/drivers/thermal/broadcom/bcm2835_thermal.c -+++ b/drivers/thermal/broadcom/bcm2835_thermal.c -@@ -119,8 +119,7 @@ static const struct debugfs_reg32 bcm2835_thermal_regs[] = { - - static void bcm2835_thermal_debugfs(struct platform_device *pdev) - { -- struct thermal_zone_device *tz = platform_get_drvdata(pdev); -- struct bcm2835_thermal_data *data = tz->devdata; -+ struct bcm2835_thermal_data *data = platform_get_drvdata(pdev); - struct debugfs_regset32 *regset; - - data->debugfsdir = debugfs_create_dir("bcm2835_thermal", NULL); -@@ -266,7 +265,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) - - data->tz = tz; - -- platform_set_drvdata(pdev, tz); -+ platform_set_drvdata(pdev, data); - - /* - * Thermal_zone doesn't enable hwmon as default, -@@ -290,8 +289,8 @@ err_clk: - - static int bcm2835_thermal_remove(struct platform_device *pdev) - { -- struct thermal_zone_device *tz = platform_get_drvdata(pdev); -- struct bcm2835_thermal_data *data = tz->devdata; -+ struct bcm2835_thermal_data *data = platform_get_drvdata(pdev); -+ struct thermal_zone_device *tz = data->tz; - - debugfs_remove_recursive(data->debugfsdir); - thermal_zone_of_sensor_unregister(&pdev->dev, tz); -diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c -index 61ca7ce3624e..5f3ed24e26ec 100644 ---- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c -+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c -@@ -22,6 +22,13 @@ enum int3400_thermal_uuid { - INT3400_THERMAL_PASSIVE_1, - INT3400_THERMAL_ACTIVE, - INT3400_THERMAL_CRITICAL, -+ INT3400_THERMAL_ADAPTIVE_PERFORMANCE, -+ INT3400_THERMAL_EMERGENCY_CALL_MODE, -+ INT3400_THERMAL_PASSIVE_2, -+ INT3400_THERMAL_POWER_BOSS, -+ INT3400_THERMAL_VIRTUAL_SENSOR, -+ INT3400_THERMAL_COOLING_MODE, -+ INT3400_THERMAL_HARDWARE_DUTY_CYCLING, - INT3400_THERMAL_MAXIMUM_UUID, - }; - -@@ -29,6 +36,13 @@ static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = { - "42A441D6-AE6A-462b-A84B-4A8CE79027D3", - "3A95C389-E4B8-4629-A526-C52C88626BAE", - "97C68AE7-15FA-499c-B8C9-5DA81D606E0A", -+ "63BE270F-1C11-48FD-A6F7-3AF253FF3E2D", -+ "5349962F-71E6-431D-9AE8-0A635B710AEE", -+ "9E04115A-AE87-4D1C-9500-0F3E340BFE75", -+ "F5A35014-C209-46A4-993A-EB56DE7530A1", -+ "6ED722A7-9240-48A5-B479-31EEF723D7CF", -+ "16CAF1B7-DD38-40ED-B1C1-1B8A1913D531", -+ "BE84BABF-C4D4-403D-B495-3128FD44dAC1", - }; - - struct int3400_thermal_priv { -@@ -299,10 +313,9 @@ static int int3400_thermal_probe(struct platform_device *pdev) - - platform_set_drvdata(pdev, priv); - -- if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) { -- int3400_thermal_ops.get_mode = int3400_thermal_get_mode; -- int3400_thermal_ops.set_mode = int3400_thermal_set_mode; -- } -+ int3400_thermal_ops.get_mode = int3400_thermal_get_mode; -+ int3400_thermal_ops.set_mode = int3400_thermal_set_mode; -+ - priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0, - priv, &int3400_thermal_ops, - &int3400_thermal_params, 0, 0); -diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c -index 7571f7c2e7c9..ac7256b5f020 100644 ---- a/drivers/thermal/intel/intel_powerclamp.c -+++ b/drivers/thermal/intel/intel_powerclamp.c -@@ -101,7 +101,7 @@ struct powerclamp_worker_data { - bool clamping; - }; - --static struct powerclamp_worker_data * __percpu worker_data; -+static struct powerclamp_worker_data __percpu *worker_data; - static struct thermal_cooling_device *cooling_dev; - static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu - * clamping kthread worker -@@ -494,7 +494,7 @@ static void start_power_clamp_worker(unsigned long cpu) - struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu); - struct kthread_worker *worker; - -- worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inject/%ld", cpu); -+ worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu); - if (IS_ERR(worker)) - return; - -diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c -index 48eef552cba4..fc9399d9c082 100644 ---- a/drivers/thermal/samsung/exynos_tmu.c -+++ b/drivers/thermal/samsung/exynos_tmu.c -@@ -666,7 +666,7 @@ static int exynos_get_temp(void *p, int *temp) - struct exynos_tmu_data *data = p; - int value, ret = 0; - -- if (!data || !data->tmu_read || !data->enabled) -+ if (!data || !data->tmu_read) - return -EINVAL; - else if (!data->enabled) - /* -diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c -index 07cad54b84f1..e8e125acd712 100644 ---- a/fs/cifs/cifsfs.c -+++ b/fs/cifs/cifsfs.c -@@ -1010,7 +1010,7 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off, - unsigned int xid; - int rc; - -- if (remap_flags & ~REMAP_FILE_ADVISORY) -+ if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) - return -EINVAL; - - cifs_dbg(FYI, "clone range\n"); -diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c -index 924269cec135..e32c264e3adb 100644 ---- a/fs/cifs/smb2maperror.c -+++ b/fs/cifs/smb2maperror.c -@@ -1036,7 +1036,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = { - {STATUS_UNFINISHED_CONTEXT_DELETED, -EIO, - "STATUS_UNFINISHED_CONTEXT_DELETED"}, - {STATUS_NO_TGT_REPLY, -EIO, "STATUS_NO_TGT_REPLY"}, -- {STATUS_OBJECTID_NOT_FOUND, -EIO, "STATUS_OBJECTID_NOT_FOUND"}, -+ /* Note that ENOATTTR and ENODATA are the same errno */ -+ {STATUS_OBJECTID_NOT_FOUND, -ENODATA, "STATUS_OBJECTID_NOT_FOUND"}, - {STATUS_NO_IP_ADDRESSES, -EIO, "STATUS_NO_IP_ADDRESSES"}, - {STATUS_WRONG_CREDENTIAL_HANDLE, -EIO, - "STATUS_WRONG_CREDENTIAL_HANDLE"}, -diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c -index b29f711ab965..ea56b1cdbdde 100644 ---- a/fs/cifs/smb2ops.c -+++ b/fs/cifs/smb2ops.c -@@ -949,6 +949,16 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, - resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER; - memset(rsp_iov, 0, sizeof(rsp_iov)); - -+ if (ses->server->ops->query_all_EAs) { -+ if (!ea_value) { -+ rc = ses->server->ops->query_all_EAs(xid, tcon, path, -+ ea_name, NULL, 0, -+ cifs_sb); -+ if (rc == -ENODATA) -+ goto sea_exit; -+ } -+ } -+ - /* Open */ - memset(&open_iov, 0, sizeof(open_iov)); - rqst[0].rq_iov = open_iov; -diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h -index 59be48206932..b49bc925fb4f 100644 ---- a/fs/cifs/trace.h -+++ b/fs/cifs/trace.h -@@ -378,19 +378,19 @@ DECLARE_EVENT_CLASS(smb3_tcon_class, - __field(unsigned int, xid) - __field(__u32, tid) - __field(__u64, sesid) -- __field(const char *, unc_name) -+ __string(name, unc_name) - __field(int, rc) - ), - TP_fast_assign( - __entry->xid = xid; - __entry->tid = tid; - __entry->sesid = sesid; -- __entry->unc_name = unc_name; -+ __assign_str(name, unc_name); - __entry->rc = rc; - ), - TP_printk("xid=%u sid=0x%llx tid=0x%x unc_name=%s rc=%d", - __entry->xid, __entry->sesid, __entry->tid, -- __entry->unc_name, __entry->rc) -+ __get_str(name), __entry->rc) - ) - - #define DEFINE_SMB3_TCON_EVENT(name) \ -diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c -index 2e76fb55d94a..5f24fdc140ad 100644 ---- a/fs/ext4/ioctl.c -+++ b/fs/ext4/ioctl.c -@@ -999,6 +999,13 @@ resizefs_out: - if (!blk_queue_discard(q)) - return -EOPNOTSUPP; - -+ /* -+ * We haven't replayed the journal, so we cannot use our -+ * block-bitmap-guided storage zapping commands. -+ */ -+ if (test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) -+ return -EROFS; -+ - if (copy_from_user(&range, (struct fstrim_range __user *)arg, - sizeof(range))) - return -EFAULT; -diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c -index 3d9b18505c0c..e7ae26e36c9c 100644 ---- a/fs/ext4/resize.c -+++ b/fs/ext4/resize.c -@@ -932,11 +932,18 @@ static int add_new_gdb_meta_bg(struct super_block *sb, - memcpy(n_group_desc, o_group_desc, - EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); - n_group_desc[gdb_num] = gdb_bh; -+ -+ BUFFER_TRACE(gdb_bh, "get_write_access"); -+ err = ext4_journal_get_write_access(handle, gdb_bh); -+ if (err) { -+ kvfree(n_group_desc); -+ brelse(gdb_bh); -+ return err; -+ } -+ - EXT4_SB(sb)->s_group_desc = n_group_desc; - EXT4_SB(sb)->s_gdb_count++; - kvfree(o_group_desc); -- BUFFER_TRACE(gdb_bh, "get_write_access"); -- err = ext4_journal_get_write_access(handle, gdb_bh); - return err; - } - -@@ -2073,6 +2080,10 @@ out: - free_flex_gd(flex_gd); - if (resize_inode != NULL) - iput(resize_inode); -- ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", n_blocks_count); -+ if (err) -+ ext4_warning(sb, "error (%d) occurred during " -+ "file system resize", err); -+ ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", -+ ext4_blocks_count(es)); - return err; - } -diff --git a/fs/ext4/super.c b/fs/ext4/super.c -index fb12d3c17c1b..b9bca7298f96 100644 ---- a/fs/ext4/super.c -+++ b/fs/ext4/super.c -@@ -430,6 +430,12 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) - spin_unlock(&sbi->s_md_lock); - } - -+static bool system_going_down(void) -+{ -+ return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF -+ || system_state == SYSTEM_RESTART; -+} -+ - /* Deal with the reporting of failure conditions on a filesystem such as - * inconsistencies detected or read IO failures. - * -@@ -460,7 +466,12 @@ static void ext4_handle_error(struct super_block *sb) - if (journal) - jbd2_journal_abort(journal, -EIO); - } -- if (test_opt(sb, ERRORS_RO)) { -+ /* -+ * We force ERRORS_RO behavior when system is rebooting. Otherwise we -+ * could panic during 'reboot -f' as the underlying device got already -+ * disabled. -+ */ -+ if (test_opt(sb, ERRORS_RO) || system_going_down()) { - ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); - /* - * Make sure updated value of ->s_mount_flags will be visible -@@ -468,8 +479,7 @@ static void ext4_handle_error(struct super_block *sb) - */ - smp_wmb(); - sb->s_flags |= SB_RDONLY; -- } -- if (test_opt(sb, ERRORS_PANIC)) { -+ } else if (test_opt(sb, ERRORS_PANIC)) { - if (EXT4_SB(sb)->s_journal && - !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR)) - return; -diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c -index f955cd3e0677..7743fa83b895 100644 ---- a/fs/f2fs/checkpoint.c -+++ b/fs/f2fs/checkpoint.c -@@ -306,8 +306,9 @@ static int f2fs_write_meta_pages(struct address_space *mapping, - goto skip_write; - - /* collect a number of dirty meta pages and write together */ -- if (wbc->for_kupdate || -- get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META)) -+ if (wbc->sync_mode != WB_SYNC_ALL && -+ get_pages(sbi, F2FS_DIRTY_META) < -+ nr_pages_to_skip(sbi, META)) - goto skip_write; - - /* if locked failed, cp will flush dirty pages instead */ -@@ -405,7 +406,7 @@ static int f2fs_set_meta_page_dirty(struct page *page) - if (!PageDirty(page)) { - __set_page_dirty_nobuffers(page); - inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META); -- SetPagePrivate(page); -+ f2fs_set_page_private(page, 0); - f2fs_trace_pid(page); - return 1; - } -@@ -956,7 +957,7 @@ void f2fs_update_dirty_page(struct inode *inode, struct page *page) - inode_inc_dirty_pages(inode); - spin_unlock(&sbi->inode_lock[type]); - -- SetPagePrivate(page); -+ f2fs_set_page_private(page, 0); - f2fs_trace_pid(page); - } - -diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c -index f91d8630c9a2..c99aab23efea 100644 ---- a/fs/f2fs/data.c -+++ b/fs/f2fs/data.c -@@ -2711,8 +2711,7 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset, - if (IS_ATOMIC_WRITTEN_PAGE(page)) - return f2fs_drop_inmem_page(inode, page); - -- set_page_private(page, 0); -- ClearPagePrivate(page); -+ f2fs_clear_page_private(page); - } - - int f2fs_release_page(struct page *page, gfp_t wait) -@@ -2726,8 +2725,7 @@ int f2fs_release_page(struct page *page, gfp_t wait) - return 0; - - clear_cold_data(page); -- set_page_private(page, 0); -- ClearPagePrivate(page); -+ f2fs_clear_page_private(page); - return 1; - } - -@@ -2795,12 +2793,8 @@ int f2fs_migrate_page(struct address_space *mapping, - return -EAGAIN; - } - -- /* -- * A reference is expected if PagePrivate set when move mapping, -- * however F2FS breaks this for maintaining dirty page counts when -- * truncating pages. So here adjusting the 'extra_count' make it work. -- */ -- extra_count = (atomic_written ? 1 : 0) - page_has_private(page); -+ /* one extra reference was held for atomic_write page */ -+ extra_count = atomic_written ? 1 : 0; - rc = migrate_page_move_mapping(mapping, newpage, - page, mode, extra_count); - if (rc != MIGRATEPAGE_SUCCESS) { -@@ -2821,9 +2815,10 @@ int f2fs_migrate_page(struct address_space *mapping, - get_page(newpage); - } - -- if (PagePrivate(page)) -- SetPagePrivate(newpage); -- set_page_private(newpage, page_private(page)); -+ if (PagePrivate(page)) { -+ f2fs_set_page_private(newpage, page_private(page)); -+ f2fs_clear_page_private(page); -+ } - - if (mode != MIGRATE_SYNC_NO_COPY) - migrate_page_copy(newpage, page); -diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c -index 50d0d36280fa..99a6063c2327 100644 ---- a/fs/f2fs/dir.c -+++ b/fs/f2fs/dir.c -@@ -728,7 +728,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, - !f2fs_truncate_hole(dir, page->index, page->index + 1)) { - f2fs_clear_page_cache_dirty_tag(page); - clear_page_dirty_for_io(page); -- ClearPagePrivate(page); -+ f2fs_clear_page_private(page); - ClearPageUptodate(page); - clear_cold_data(page); - inode_dec_dirty_pages(dir); -diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h -index 279bc00489cc..6d9186a6528c 100644 ---- a/fs/f2fs/f2fs.h -+++ b/fs/f2fs/f2fs.h -@@ -2825,6 +2825,27 @@ static inline bool is_valid_data_blkaddr(struct f2fs_sb_info *sbi, - return true; - } - -+static inline void f2fs_set_page_private(struct page *page, -+ unsigned long data) -+{ -+ if (PagePrivate(page)) -+ return; -+ -+ get_page(page); -+ SetPagePrivate(page); -+ set_page_private(page, data); -+} -+ -+static inline void f2fs_clear_page_private(struct page *page) -+{ -+ if (!PagePrivate(page)) -+ return; -+ -+ set_page_private(page, 0); -+ ClearPagePrivate(page); -+ f2fs_put_page(page, 0); -+} -+ - /* - * file.c - */ -diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c -index ae2b45e75847..30ed43bce110 100644 ---- a/fs/f2fs/file.c -+++ b/fs/f2fs/file.c -@@ -768,7 +768,6 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr) - { - struct inode *inode = d_inode(dentry); - int err; -- bool size_changed = false; - - if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) - return -EIO; -@@ -843,8 +842,6 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr) - down_write(&F2FS_I(inode)->i_sem); - F2FS_I(inode)->last_disk_size = i_size_read(inode); - up_write(&F2FS_I(inode)->i_sem); -- -- size_changed = true; - } - - __setattr_copy(inode, attr); -@@ -858,7 +855,7 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr) - } - - /* file size may changed here */ -- f2fs_mark_inode_dirty_sync(inode, size_changed); -+ f2fs_mark_inode_dirty_sync(inode, true); - - /* inode change will produce dirty node pages flushed by checkpoint */ - f2fs_balance_fs(F2FS_I_SB(inode), true); -diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c -index 4f450e573312..3f99ab288695 100644 ---- a/fs/f2fs/node.c -+++ b/fs/f2fs/node.c -@@ -1920,7 +1920,9 @@ static int f2fs_write_node_pages(struct address_space *mapping, - f2fs_balance_fs_bg(sbi); - - /* collect a number of dirty node pages and write together */ -- if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE)) -+ if (wbc->sync_mode != WB_SYNC_ALL && -+ get_pages(sbi, F2FS_DIRTY_NODES) < -+ nr_pages_to_skip(sbi, NODE)) - goto skip_write; - - if (wbc->sync_mode == WB_SYNC_ALL) -@@ -1959,7 +1961,7 @@ static int f2fs_set_node_page_dirty(struct page *page) - if (!PageDirty(page)) { - __set_page_dirty_nobuffers(page); - inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); -- SetPagePrivate(page); -+ f2fs_set_page_private(page, 0); - f2fs_trace_pid(page); - return 1; - } -diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c -index e1b1d390b329..b6c8b0696ef6 100644 ---- a/fs/f2fs/segment.c -+++ b/fs/f2fs/segment.c -@@ -191,8 +191,7 @@ void f2fs_register_inmem_page(struct inode *inode, struct page *page) - - f2fs_trace_pid(page); - -- set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE); -- SetPagePrivate(page); -+ f2fs_set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE); - - new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS); - -@@ -280,8 +279,7 @@ next: - ClearPageUptodate(page); - clear_cold_data(page); - } -- set_page_private(page, 0); -- ClearPagePrivate(page); -+ f2fs_clear_page_private(page); - f2fs_put_page(page, 1); - - list_del(&cur->list); -@@ -370,8 +368,7 @@ void f2fs_drop_inmem_page(struct inode *inode, struct page *page) - kmem_cache_free(inmem_entry_slab, cur); - - ClearPageUptodate(page); -- set_page_private(page, 0); -- ClearPagePrivate(page); -+ f2fs_clear_page_private(page); - f2fs_put_page(page, 0); - - trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE); -diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c -index 5892fa3c885f..144ffba3ec5a 100644 ---- a/fs/f2fs/super.c -+++ b/fs/f2fs/super.c -@@ -1460,9 +1460,16 @@ static int f2fs_enable_quotas(struct super_block *sb); - - static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi) - { -+ unsigned int s_flags = sbi->sb->s_flags; - struct cp_control cpc; -- int err; -+ int err = 0; -+ int ret; - -+ if (s_flags & SB_RDONLY) { -+ f2fs_msg(sbi->sb, KERN_ERR, -+ "checkpoint=disable on readonly fs"); -+ return -EINVAL; -+ } - sbi->sb->s_flags |= SB_ACTIVE; - - f2fs_update_time(sbi, DISABLE_TIME); -@@ -1470,18 +1477,24 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi) - while (!f2fs_time_over(sbi, DISABLE_TIME)) { - mutex_lock(&sbi->gc_mutex); - err = f2fs_gc(sbi, true, false, NULL_SEGNO); -- if (err == -ENODATA) -+ if (err == -ENODATA) { -+ err = 0; - break; -+ } - if (err && err != -EAGAIN) -- return err; -+ break; - } - -- err = sync_filesystem(sbi->sb); -- if (err) -- return err; -+ ret = sync_filesystem(sbi->sb); -+ if (ret || err) { -+ err = ret ? ret: err; -+ goto restore_flag; -+ } - -- if (f2fs_disable_cp_again(sbi)) -- return -EAGAIN; -+ if (f2fs_disable_cp_again(sbi)) { -+ err = -EAGAIN; -+ goto restore_flag; -+ } - - mutex_lock(&sbi->gc_mutex); - cpc.reason = CP_PAUSE; -@@ -1490,7 +1503,9 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi) - - sbi->unusable_block_count = 0; - mutex_unlock(&sbi->gc_mutex); -- return 0; -+restore_flag: -+ sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */ -+ return err; - } - - static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi) -@@ -3359,7 +3374,7 @@ skip_recovery: - if (test_opt(sbi, DISABLE_CHECKPOINT)) { - err = f2fs_disable_checkpoint(sbi); - if (err) -- goto free_meta; -+ goto sync_free_meta; - } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) { - f2fs_enable_checkpoint(sbi); - } -@@ -3372,7 +3387,7 @@ skip_recovery: - /* After POR, we can run background GC thread.*/ - err = f2fs_start_gc_thread(sbi); - if (err) -- goto free_meta; -+ goto sync_free_meta; - } - kvfree(options); - -@@ -3394,6 +3409,11 @@ skip_recovery: - f2fs_update_time(sbi, REQ_TIME); - return 0; - -+sync_free_meta: -+ /* safe to flush all the data */ -+ sync_filesystem(sbi->sb); -+ retry = false; -+ - free_meta: - #ifdef CONFIG_QUOTA - f2fs_truncate_quota_inode_pages(sb); -@@ -3407,6 +3427,8 @@ free_meta: - * falls into an infinite loop in f2fs_sync_meta_pages(). - */ - truncate_inode_pages_final(META_MAPPING(sbi)); -+ /* evict some inodes being cached by GC */ -+ evict_inodes(sb); - f2fs_unregister_sysfs(sbi); - free_root_inode: - dput(sb->s_root); -diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c -index 73b92985198b..6b6fe6431a64 100644 ---- a/fs/f2fs/xattr.c -+++ b/fs/f2fs/xattr.c -@@ -347,7 +347,7 @@ check: - *base_addr = txattr_addr; - return 0; - out: -- kzfree(txattr_addr); -+ kvfree(txattr_addr); - return err; - } - -@@ -390,7 +390,7 @@ static int read_all_xattrs(struct inode *inode, struct page *ipage, - *base_addr = txattr_addr; - return 0; - fail: -- kzfree(txattr_addr); -+ kvfree(txattr_addr); - return err; - } - -@@ -517,7 +517,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name, - } - error = size; - out: -- kzfree(base_addr); -+ kvfree(base_addr); - return error; - } - -@@ -563,7 +563,7 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) - } - error = buffer_size - rest; - cleanup: -- kzfree(base_addr); -+ kvfree(base_addr); - return error; - } - -@@ -694,7 +694,7 @@ static int __f2fs_setxattr(struct inode *inode, int index, - if (!error && S_ISDIR(inode->i_mode)) - set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP); - exit: -- kzfree(base_addr); -+ kvfree(base_addr); - return error; - } - -diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c -index 798f1253141a..3b7b8e95c98a 100644 ---- a/fs/notify/inotify/inotify_user.c -+++ b/fs/notify/inotify/inotify_user.c -@@ -519,8 +519,10 @@ static int inotify_update_existing_watch(struct fsnotify_group *group, - fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group); - if (!fsn_mark) - return -ENOENT; -- else if (create) -- return -EEXIST; -+ else if (create) { -+ ret = -EEXIST; -+ goto out; -+ } - - i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); - -@@ -548,6 +550,7 @@ static int inotify_update_existing_watch(struct fsnotify_group *group, - /* return the wd */ - ret = i_mark->wd; - -+out: - /* match the get from fsnotify_find_mark() */ - fsnotify_put_mark(fsn_mark); - -diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c -index bbcc185062bb..d29d869abec1 100644 ---- a/fs/proc/kcore.c -+++ b/fs/proc/kcore.c -@@ -54,6 +54,28 @@ static LIST_HEAD(kclist_head); - static DECLARE_RWSEM(kclist_lock); - static int kcore_need_update = 1; - -+/* -+ * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error -+ * Same as oldmem_pfn_is_ram in vmcore -+ */ -+static int (*mem_pfn_is_ram)(unsigned long pfn); -+ -+int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn)) -+{ -+ if (mem_pfn_is_ram) -+ return -EBUSY; -+ mem_pfn_is_ram = fn; -+ return 0; -+} -+ -+static int pfn_is_ram(unsigned long pfn) -+{ -+ if (mem_pfn_is_ram) -+ return mem_pfn_is_ram(pfn); -+ else -+ return 1; -+} -+ - /* This doesn't grab kclist_lock, so it should only be used at init time. */ - void __init kclist_add(struct kcore_list *new, void *addr, size_t size, - int type) -@@ -465,6 +487,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) - goto out; - } - m = NULL; /* skip the list anchor */ -+ } else if (!pfn_is_ram(__pa(start) >> PAGE_SHIFT)) { -+ if (clear_user(buffer, tsz)) { -+ ret = -EFAULT; -+ goto out; -+ } - } else if (m->type == KCORE_VMALLOC) { - vread(buf, (char *)start, tsz); - /* we have to zero-fill user buffer even if no read */ -diff --git a/include/linux/atalk.h b/include/linux/atalk.h -index 840cf92307ba..d5cfc0b15b76 100644 ---- a/include/linux/atalk.h -+++ b/include/linux/atalk.h -@@ -158,7 +158,7 @@ extern int sysctl_aarp_retransmit_limit; - extern int sysctl_aarp_resolve_time; - - #ifdef CONFIG_SYSCTL --extern void atalk_register_sysctl(void); -+extern int atalk_register_sysctl(void); - extern void atalk_unregister_sysctl(void); - #else - static inline int atalk_register_sysctl(void) -diff --git a/include/linux/kcore.h b/include/linux/kcore.h -index 8c3f8c14eeaa..c843f4a9c512 100644 ---- a/include/linux/kcore.h -+++ b/include/linux/kcore.h -@@ -44,6 +44,8 @@ void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz) - m->vaddr = (unsigned long)vaddr; - kclist_add(m, addr, sz, KCORE_REMAP); - } -+ -+extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn)); - #else - static inline - void kclist_add(struct kcore_list *new, void *addr, size_t size, int type) -diff --git a/include/linux/swap.h b/include/linux/swap.h -index 622025ac1461..f1146ed21062 100644 ---- a/include/linux/swap.h -+++ b/include/linux/swap.h -@@ -157,9 +157,9 @@ struct swap_extent { - /* - * Max bad pages in the new format.. - */ --#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x) - #define MAX_SWAP_BADPAGES \ -- ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)) -+ ((offsetof(union swap_header, magic.magic) - \ -+ offsetof(union swap_header, info.badpages)) / sizeof(int)) - - enum { - SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ -diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h -index 5b50fe4906d2..7b60fd186cfe 100644 ---- a/include/trace/events/rxrpc.h -+++ b/include/trace/events/rxrpc.h -@@ -76,6 +76,7 @@ enum rxrpc_client_trace { - rxrpc_client_chan_disconnect, - rxrpc_client_chan_pass, - rxrpc_client_chan_unstarted, -+ rxrpc_client_chan_wait_failed, - rxrpc_client_cleanup, - rxrpc_client_count, - rxrpc_client_discard, -@@ -276,6 +277,7 @@ enum rxrpc_tx_point { - EM(rxrpc_client_chan_disconnect, "ChDisc") \ - EM(rxrpc_client_chan_pass, "ChPass") \ - EM(rxrpc_client_chan_unstarted, "ChUnst") \ -+ EM(rxrpc_client_chan_wait_failed, "ChWtFl") \ - EM(rxrpc_client_cleanup, "Clean ") \ - EM(rxrpc_client_count, "Count ") \ - EM(rxrpc_client_discard, "Discar") \ -diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c -index 2ada5e21dfa6..4a8f390a2b82 100644 ---- a/kernel/bpf/inode.c -+++ b/kernel/bpf/inode.c -@@ -554,19 +554,6 @@ struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type typ - } - EXPORT_SYMBOL(bpf_prog_get_type_path); - --static void bpf_evict_inode(struct inode *inode) --{ -- enum bpf_type type; -- -- truncate_inode_pages_final(&inode->i_data); -- clear_inode(inode); -- -- if (S_ISLNK(inode->i_mode)) -- kfree(inode->i_link); -- if (!bpf_inode_type(inode, &type)) -- bpf_any_put(inode->i_private, type); --} -- - /* - * Display the mount options in /proc/mounts. - */ -@@ -579,11 +566,28 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root) - return 0; - } - -+static void bpf_destroy_inode_deferred(struct rcu_head *head) -+{ -+ struct inode *inode = container_of(head, struct inode, i_rcu); -+ enum bpf_type type; -+ -+ if (S_ISLNK(inode->i_mode)) -+ kfree(inode->i_link); -+ if (!bpf_inode_type(inode, &type)) -+ bpf_any_put(inode->i_private, type); -+ free_inode_nonrcu(inode); -+} -+ -+static void bpf_destroy_inode(struct inode *inode) -+{ -+ call_rcu(&inode->i_rcu, bpf_destroy_inode_deferred); -+} -+ - static const struct super_operations bpf_super_ops = { - .statfs = simple_statfs, - .drop_inode = generic_delete_inode, - .show_options = bpf_show_options, -- .evict_inode = bpf_evict_inode, -+ .destroy_inode = bpf_destroy_inode, - }; - - enum { -diff --git a/kernel/events/core.c b/kernel/events/core.c -index 26d6edab051a..2e2305a81047 100644 ---- a/kernel/events/core.c -+++ b/kernel/events/core.c -@@ -7178,6 +7178,7 @@ static void perf_event_mmap_output(struct perf_event *event, - struct perf_output_handle handle; - struct perf_sample_data sample; - int size = mmap_event->event_id.header.size; -+ u32 type = mmap_event->event_id.header.type; - int ret; - - if (!perf_event_mmap_match(event, data)) -@@ -7221,6 +7222,7 @@ static void perf_event_mmap_output(struct perf_event *event, - perf_output_end(&handle); - out: - mmap_event->event_id.header.size = size; -+ mmap_event->event_id.header.type = type; - } - - static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 01a2489de94e..62cc29364fba 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -6942,7 +6942,7 @@ static int __maybe_unused cpu_period_quota_parse(char *buf, - { - char tok[21]; /* U64_MAX */ - -- if (!sscanf(buf, "%s %llu", tok, periodp)) -+ if (sscanf(buf, "%20s %llu", tok, periodp) < 1) - return -EINVAL; - - *periodp *= NSEC_PER_USEC; -diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c -index 033ec7c45f13..1ccf77f6d346 100644 ---- a/kernel/sched/cpufreq_schedutil.c -+++ b/kernel/sched/cpufreq_schedutil.c -@@ -48,10 +48,10 @@ struct sugov_cpu { - - bool iowait_boost_pending; - unsigned int iowait_boost; -- unsigned int iowait_boost_max; - u64 last_update; - - unsigned long bw_dl; -+ unsigned long min; - unsigned long max; - - /* The field below is for single-CPU policies only: */ -@@ -303,8 +303,7 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time, - if (delta_ns <= TICK_NSEC) - return false; - -- sg_cpu->iowait_boost = set_iowait_boost -- ? sg_cpu->sg_policy->policy->min : 0; -+ sg_cpu->iowait_boost = set_iowait_boost ? sg_cpu->min : 0; - sg_cpu->iowait_boost_pending = set_iowait_boost; - - return true; -@@ -344,14 +343,13 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, - - /* Double the boost at each request */ - if (sg_cpu->iowait_boost) { -- sg_cpu->iowait_boost <<= 1; -- if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max) -- sg_cpu->iowait_boost = sg_cpu->iowait_boost_max; -+ sg_cpu->iowait_boost = -+ min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE); - return; - } - - /* First wakeup after IO: start with minimum boost */ -- sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min; -+ sg_cpu->iowait_boost = sg_cpu->min; - } - - /** -@@ -373,47 +371,38 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, - * This mechanism is designed to boost high frequently IO waiting tasks, while - * being more conservative on tasks which does sporadic IO operations. - */ --static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time, -- unsigned long *util, unsigned long *max) -+static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time, -+ unsigned long util, unsigned long max) - { -- unsigned int boost_util, boost_max; -+ unsigned long boost; - - /* No boost currently required */ - if (!sg_cpu->iowait_boost) -- return; -+ return util; - - /* Reset boost if the CPU appears to have been idle enough */ - if (sugov_iowait_reset(sg_cpu, time, false)) -- return; -+ return util; - -- /* -- * An IO waiting task has just woken up: -- * allow to further double the boost value -- */ -- if (sg_cpu->iowait_boost_pending) { -- sg_cpu->iowait_boost_pending = false; -- } else { -+ if (!sg_cpu->iowait_boost_pending) { - /* -- * Otherwise: reduce the boost value and disable it when we -- * reach the minimum. -+ * No boost pending; reduce the boost value. - */ - sg_cpu->iowait_boost >>= 1; -- if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) { -+ if (sg_cpu->iowait_boost < sg_cpu->min) { - sg_cpu->iowait_boost = 0; -- return; -+ return util; - } - } - -+ sg_cpu->iowait_boost_pending = false; -+ - /* -- * Apply the current boost value: a CPU is boosted only if its current -- * utilization is smaller then the current IO boost level. -+ * @util is already in capacity scale; convert iowait_boost -+ * into the same scale so we can compare. - */ -- boost_util = sg_cpu->iowait_boost; -- boost_max = sg_cpu->iowait_boost_max; -- if (*util * boost_max < *max * boost_util) { -- *util = boost_util; -- *max = boost_max; -- } -+ boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT; -+ return max(boost, util); - } - - #ifdef CONFIG_NO_HZ_COMMON -@@ -460,7 +449,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, - - util = sugov_get_util(sg_cpu); - max = sg_cpu->max; -- sugov_iowait_apply(sg_cpu, time, &util, &max); -+ util = sugov_iowait_apply(sg_cpu, time, util, max); - next_f = get_next_freq(sg_policy, util, max); - /* - * Do not reduce the frequency if the CPU has not been idle -@@ -500,7 +489,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) - - j_util = sugov_get_util(j_sg_cpu); - j_max = j_sg_cpu->max; -- sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max); -+ j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max); - - if (j_util * max > j_max * util) { - util = j_util; -@@ -837,7 +826,9 @@ static int sugov_start(struct cpufreq_policy *policy) - memset(sg_cpu, 0, sizeof(*sg_cpu)); - sg_cpu->cpu = cpu; - sg_cpu->sg_policy = sg_policy; -- sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; -+ sg_cpu->min = -+ (SCHED_CAPACITY_SCALE * policy->cpuinfo.min_freq) / -+ policy->cpuinfo.max_freq; - } - - for_each_cpu(cpu, policy->cpus) { -diff --git a/lib/div64.c b/lib/div64.c -index 01c8602bb6ff..ee146bb4c558 100644 ---- a/lib/div64.c -+++ b/lib/div64.c -@@ -109,7 +109,7 @@ u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) - quot = div_u64_rem(dividend, divisor, &rem32); - *remainder = rem32; - } else { -- int n = 1 + fls(high); -+ int n = fls(high); - quot = div_u64(dividend >> n, divisor >> n); - - if (quot != 0) -@@ -147,7 +147,7 @@ u64 div64_u64(u64 dividend, u64 divisor) - if (high == 0) { - quot = div_u64(dividend, divisor); - } else { -- int n = 1 + fls(high); -+ int n = fls(high); - quot = div_u64(dividend >> n, divisor >> n); - - if (quot != 0) -diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c -index 8006295f8bd7..dda73991bb54 100644 ---- a/net/appletalk/atalk_proc.c -+++ b/net/appletalk/atalk_proc.c -@@ -255,7 +255,7 @@ out_interface: - goto out; - } - --void __exit atalk_proc_exit(void) -+void atalk_proc_exit(void) - { - remove_proc_entry("interface", atalk_proc_dir); - remove_proc_entry("route", atalk_proc_dir); -diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c -index 9b6bc5abe946..795fbc6c06aa 100644 ---- a/net/appletalk/ddp.c -+++ b/net/appletalk/ddp.c -@@ -1910,12 +1910,16 @@ static const char atalk_err_snap[] __initconst = - /* Called by proto.c on kernel start up */ - static int __init atalk_init(void) - { -- int rc = proto_register(&ddp_proto, 0); -+ int rc; - -- if (rc != 0) -+ rc = proto_register(&ddp_proto, 0); -+ if (rc) - goto out; - -- (void)sock_register(&atalk_family_ops); -+ rc = sock_register(&atalk_family_ops); -+ if (rc) -+ goto out_proto; -+ - ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv); - if (!ddp_dl) - printk(atalk_err_snap); -@@ -1923,12 +1927,33 @@ static int __init atalk_init(void) - dev_add_pack(<alk_packet_type); - dev_add_pack(&ppptalk_packet_type); - -- register_netdevice_notifier(&ddp_notifier); -+ rc = register_netdevice_notifier(&ddp_notifier); -+ if (rc) -+ goto out_sock; -+ - aarp_proto_init(); -- atalk_proc_init(); -- atalk_register_sysctl(); -+ rc = atalk_proc_init(); -+ if (rc) -+ goto out_aarp; -+ -+ rc = atalk_register_sysctl(); -+ if (rc) -+ goto out_proc; - out: - return rc; -+out_proc: -+ atalk_proc_exit(); -+out_aarp: -+ aarp_cleanup_module(); -+ unregister_netdevice_notifier(&ddp_notifier); -+out_sock: -+ dev_remove_pack(&ppptalk_packet_type); -+ dev_remove_pack(<alk_packet_type); -+ unregister_snap_client(ddp_dl); -+ sock_unregister(PF_APPLETALK); -+out_proto: -+ proto_unregister(&ddp_proto); -+ goto out; - } - module_init(atalk_init); - -diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c -index c744a853fa5f..d945b7c0176d 100644 ---- a/net/appletalk/sysctl_net_atalk.c -+++ b/net/appletalk/sysctl_net_atalk.c -@@ -45,9 +45,12 @@ static struct ctl_table atalk_table[] = { - - static struct ctl_table_header *atalk_table_header; - --void atalk_register_sysctl(void) -+int __init atalk_register_sysctl(void) - { - atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table); -+ if (!atalk_table_header) -+ return -ENOMEM; -+ return 0; - } - - void atalk_unregister_sysctl(void) -diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c -index 5cf6d9f4761d..83797b3949e2 100644 ---- a/net/rxrpc/conn_client.c -+++ b/net/rxrpc/conn_client.c -@@ -704,6 +704,7 @@ int rxrpc_connect_call(struct rxrpc_sock *rx, - - ret = rxrpc_wait_for_channel(call, gfp); - if (ret < 0) { -+ trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed); - rxrpc_disconnect_client_call(call); - goto out; - } -@@ -774,16 +775,22 @@ static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet) - */ - void rxrpc_disconnect_client_call(struct rxrpc_call *call) - { -- unsigned int channel = call->cid & RXRPC_CHANNELMASK; - struct rxrpc_connection *conn = call->conn; -- struct rxrpc_channel *chan = &conn->channels[channel]; -+ struct rxrpc_channel *chan = NULL; - struct rxrpc_net *rxnet = conn->params.local->rxnet; -+ unsigned int channel = -1; -+ u32 cid; - -+ spin_lock(&conn->channel_lock); -+ -+ cid = call->cid; -+ if (cid) { -+ channel = cid & RXRPC_CHANNELMASK; -+ chan = &conn->channels[channel]; -+ } - trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect); - call->conn = NULL; - -- spin_lock(&conn->channel_lock); -- - /* Calls that have never actually been assigned a channel can simply be - * discarded. If the conn didn't get used either, it will follow - * immediately unless someone else grabs it in the meantime. -@@ -807,7 +814,10 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call) - goto out; - } - -- ASSERTCMP(rcu_access_pointer(chan->call), ==, call); -+ if (rcu_access_pointer(chan->call) != call) { -+ spin_unlock(&conn->channel_lock); -+ BUG(); -+ } - - /* If a client call was exposed to the world, we save the result for - * retransmission. -diff --git a/sound/drivers/opl3/opl3_voice.h b/sound/drivers/opl3/opl3_voice.h -index 5b02bd49fde4..4e4ecc21760b 100644 ---- a/sound/drivers/opl3/opl3_voice.h -+++ b/sound/drivers/opl3/opl3_voice.h -@@ -41,7 +41,7 @@ void snd_opl3_timer_func(struct timer_list *t); - - /* Prototypes for opl3_drums.c */ - void snd_opl3_load_drums(struct snd_opl3 *opl3); --void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int on_off, int vel, struct snd_midi_channel *chan); -+void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int vel, int on_off, struct snd_midi_channel *chan); - - /* Prototypes for opl3_oss.c */ - #if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS) -diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c -index d77dcba276b5..1eb8b61a185b 100644 ---- a/sound/isa/sb/sb8.c -+++ b/sound/isa/sb/sb8.c -@@ -111,6 +111,10 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev) - - /* block the 0x388 port to avoid PnP conflicts */ - acard->fm_res = request_region(0x388, 4, "SoundBlaster FM"); -+ if (!acard->fm_res) { -+ err = -EBUSY; -+ goto _err; -+ } - - if (port[dev] != SNDRV_AUTO_PORT) { - if ((err = snd_sbdsp_create(card, port[dev], irq[dev], -diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c -index 907cf1a46712..3ef2b27ebbe8 100644 ---- a/sound/pci/echoaudio/echoaudio.c -+++ b/sound/pci/echoaudio/echoaudio.c -@@ -1954,6 +1954,11 @@ static int snd_echo_create(struct snd_card *card, - } - chip->dsp_registers = (volatile u32 __iomem *) - ioremap_nocache(chip->dsp_registers_phys, sz); -+ if (!chip->dsp_registers) { -+ dev_err(chip->card->dev, "ioremap failed\n"); -+ snd_echo_free(chip); -+ return -ENOMEM; -+ } - - if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED, - KBUILD_MODNAME, chip)) { -diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c -index 169e347c76f6..9ba1a2e1ed7a 100644 ---- a/tools/lib/bpf/libbpf.c -+++ b/tools/lib/bpf/libbpf.c -@@ -627,7 +627,7 @@ bpf_object__init_maps(struct bpf_object *obj, int flags) - bool strict = !(flags & MAPS_RELAX_COMPAT); - int i, map_idx, map_def_sz, nr_maps = 0; - Elf_Scn *scn; -- Elf_Data *data; -+ Elf_Data *data = NULL; - Elf_Data *symbols = obj->efile.symbols; - - if (obj->efile.maps_shndx < 0) -diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt -index 4ac7775fbc11..4851285ba00c 100644 ---- a/tools/perf/Documentation/perf-config.txt -+++ b/tools/perf/Documentation/perf-config.txt -@@ -114,7 +114,7 @@ Given a $HOME/.perfconfig like this: - - [report] - # Defaults -- sort-order = comm,dso,symbol -+ sort_order = comm,dso,symbol - percent-limit = 0 - queue-size = 0 - children = true -diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt -index 4bc2085e5197..39c05f89104e 100644 ---- a/tools/perf/Documentation/perf-stat.txt -+++ b/tools/perf/Documentation/perf-stat.txt -@@ -72,9 +72,8 @@ report:: - --all-cpus:: - system-wide collection from all CPUs (default if no target is specified) - ---c:: ----scale:: -- scale/normalize counter values -+--no-scale:: -+ Don't scale/normalize counter values - - -d:: - --detailed:: -diff --git a/tools/perf/bench/epoll-ctl.c b/tools/perf/bench/epoll-ctl.c -index 0c0a6e824934..2af067859966 100644 ---- a/tools/perf/bench/epoll-ctl.c -+++ b/tools/perf/bench/epoll-ctl.c -@@ -224,7 +224,7 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu) - pthread_attr_t thread_attr, *attrp = NULL; - cpu_set_t cpuset; - unsigned int i, j; -- int ret; -+ int ret = 0; - - if (!noaffinity) - pthread_attr_init(&thread_attr); -diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c -index 5a11534e96a0..fe85448abd45 100644 ---- a/tools/perf/bench/epoll-wait.c -+++ b/tools/perf/bench/epoll-wait.c -@@ -293,7 +293,7 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu) - pthread_attr_t thread_attr, *attrp = NULL; - cpu_set_t cpuset; - unsigned int i, j; -- int ret, events = EPOLLIN; -+ int ret = 0, events = EPOLLIN; - - if (oneshot) - events |= EPOLLONESHOT; -diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c -index 63a3afc7f32b..a52295dbad2b 100644 ---- a/tools/perf/builtin-stat.c -+++ b/tools/perf/builtin-stat.c -@@ -728,7 +728,8 @@ static struct option stat_options[] = { - "system-wide collection from all CPUs"), - OPT_BOOLEAN('g', "group", &group, - "put the counters into a counter group"), -- OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"), -+ OPT_BOOLEAN(0, "scale", &stat_config.scale, -+ "Use --no-scale to disable counter scaling for multiplexing"), - OPT_INCR('v', "verbose", &verbose, - "be more verbose (show counter open errors, etc)"), - OPT_INTEGER('r', "repeat", &stat_config.run_count, -diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c -index f64e312db787..616408251e25 100644 ---- a/tools/perf/builtin-top.c -+++ b/tools/perf/builtin-top.c -@@ -1633,8 +1633,9 @@ int cmd_top(int argc, const char **argv) - annotation_config__init(); - - symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL); -- if (symbol__init(NULL) < 0) -- return -1; -+ status = symbol__init(NULL); -+ if (status < 0) -+ goto out_delete_evlist; - - sort__setup_elide(stdout); - -diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c -index 6d598cc071ae..1a9c3becf5ff 100644 ---- a/tools/perf/tests/backward-ring-buffer.c -+++ b/tools/perf/tests/backward-ring-buffer.c -@@ -18,7 +18,7 @@ static void testcase(void) - int i; - - for (i = 0; i < NR_ITERS; i++) { -- char proc_name[10]; -+ char proc_name[15]; - - snprintf(proc_name, sizeof(proc_name), "p:%d\n", i); - prctl(PR_SET_NAME, proc_name); -diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c -index ea7acf403727..71f60c0f9faa 100644 ---- a/tools/perf/tests/evsel-tp-sched.c -+++ b/tools/perf/tests/evsel-tp-sched.c -@@ -85,5 +85,6 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes - if (perf_evsel__test_field(evsel, "target_cpu", 4, true)) - ret = -1; - -+ perf_evsel__delete(evsel); - return ret; - } -diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c -index 01f0706995a9..9acc1e80b936 100644 ---- a/tools/perf/tests/expr.c -+++ b/tools/perf/tests/expr.c -@@ -19,7 +19,7 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused) - const char *p; - const char **other; - double val; -- int ret; -+ int i, ret; - struct parse_ctx ctx; - int num_other; - -@@ -56,6 +56,9 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused) - TEST_ASSERT_VAL("find other", !strcmp(other[1], "BAZ")); - TEST_ASSERT_VAL("find other", !strcmp(other[2], "BOZO")); - TEST_ASSERT_VAL("find other", other[3] == NULL); -+ -+ for (i = 0; i < num_other; i++) -+ free((void *)other[i]); - free((void *)other); - - return 0; -diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c -index c531e6deb104..493ecb611540 100644 ---- a/tools/perf/tests/openat-syscall-all-cpus.c -+++ b/tools/perf/tests/openat-syscall-all-cpus.c -@@ -45,7 +45,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int - if (IS_ERR(evsel)) { - tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat"); - pr_debug("%s\n", errbuf); -- goto out_thread_map_delete; -+ goto out_cpu_map_delete; - } - - if (perf_evsel__open(evsel, cpus, threads) < 0) { -@@ -119,6 +119,8 @@ out_close_fd: - perf_evsel__close_fd(evsel); - out_evsel_delete: - perf_evsel__delete(evsel); -+out_cpu_map_delete: -+ cpu_map__put(cpus); - out_thread_map_delete: - thread_map__put(threads); - return err; -diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c -index 04b1d53e4bf9..1d352621bd48 100644 ---- a/tools/perf/util/build-id.c -+++ b/tools/perf/util/build-id.c -@@ -183,6 +183,7 @@ char *build_id_cache__linkname(const char *sbuild_id, char *bf, size_t size) - return bf; - } - -+/* The caller is responsible to free the returned buffer. */ - char *build_id_cache__origname(const char *sbuild_id) - { - char *linkname; -diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c -index 1ea8f898f1a1..9ecdbd5986b3 100644 ---- a/tools/perf/util/config.c -+++ b/tools/perf/util/config.c -@@ -632,11 +632,10 @@ static int collect_config(const char *var, const char *value, - } - - ret = set_value(item, value); -- return ret; - - out_free: - free(key); -- return -1; -+ return ret; - } - - int perf_config_set__collect(struct perf_config_set *set, const char *file_name, -diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c -index dbc0466db368..50c933044f88 100644 ---- a/tools/perf/util/evsel.c -+++ b/tools/perf/util/evsel.c -@@ -1289,6 +1289,7 @@ void perf_evsel__exit(struct perf_evsel *evsel) - { - assert(list_empty(&evsel->node)); - assert(evsel->evlist == NULL); -+ perf_evsel__free_counts(evsel); - perf_evsel__free_fd(evsel); - perf_evsel__free_id(evsel); - perf_evsel__free_config_terms(evsel); -@@ -1341,8 +1342,7 @@ void perf_counts_values__scale(struct perf_counts_values *count, - scaled = 1; - count->val = (u64)((double) count->val * count->ena / count->run + 0.5); - } -- } else -- count->ena = count->run = 0; -+ } - - if (pscaled) - *pscaled = scaled; -diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c -index 8aad8330e392..e416e76f5600 100644 ---- a/tools/perf/util/hist.c -+++ b/tools/perf/util/hist.c -@@ -1048,8 +1048,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, - - err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent, - iter->evsel, al, max_stack_depth); -- if (err) -+ if (err) { -+ map__put(alm); - return err; -+ } - - err = iter->ops->prepare_entry(iter, al); - if (err) -diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c -index 6751301a755c..2b37f56f0549 100644 ---- a/tools/perf/util/map.c -+++ b/tools/perf/util/map.c -@@ -571,10 +571,25 @@ static void __maps__purge(struct maps *maps) - } - } - -+static void __maps__purge_names(struct maps *maps) -+{ -+ struct rb_root *root = &maps->names; -+ struct rb_node *next = rb_first(root); -+ -+ while (next) { -+ struct map *pos = rb_entry(next, struct map, rb_node_name); -+ -+ next = rb_next(&pos->rb_node_name); -+ rb_erase_init(&pos->rb_node_name, root); -+ map__put(pos); -+ } -+} -+ - static void maps__exit(struct maps *maps) - { - down_write(&maps->lock); - __maps__purge(maps); -+ __maps__purge_names(maps); - up_write(&maps->lock); - } - -@@ -911,6 +926,9 @@ static void __maps__remove(struct maps *maps, struct map *map) - { - rb_erase_init(&map->rb_node, &maps->entries); - map__put(map); -+ -+ rb_erase_init(&map->rb_node_name, &maps->names); -+ map__put(map); - } - - void maps__remove(struct maps *maps, struct map *map) -diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c -index ea523d3b248f..989fed6f43b5 100644 ---- a/tools/perf/util/ordered-events.c -+++ b/tools/perf/util/ordered-events.c -@@ -270,6 +270,8 @@ static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how, - "FINAL", - "ROUND", - "HALF ", -+ "TOP ", -+ "TIME ", - }; - int err; - bool show_progress = false; -diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c -index 920e1e6551dd..03860313313c 100644 ---- a/tools/perf/util/parse-events.c -+++ b/tools/perf/util/parse-events.c -@@ -2271,6 +2271,7 @@ static bool is_event_supported(u8 type, unsigned config) - perf_evsel__delete(evsel); - } - -+ thread_map__put(tmap); - return ret; - } - -@@ -2341,6 +2342,7 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob, - printf(" %-50s [%s]\n", buf, "SDT event"); - free(buf); - } -+ free(path); - } else - printf(" %-50s [%s]\n", nd->s, "SDT event"); - if (nd2) { -diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c -index 4d40515307b8..2856cc9d5a31 100644 ---- a/tools/perf/util/stat.c -+++ b/tools/perf/util/stat.c -@@ -291,10 +291,8 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel - break; - case AGGR_GLOBAL: - aggr->val += count->val; -- if (config->scale) { -- aggr->ena += count->ena; -- aggr->run += count->run; -- } -+ aggr->ena += count->ena; -+ aggr->run += count->run; - case AGGR_UNSET: - default: - break; -@@ -442,10 +440,8 @@ int create_perf_stat_counter(struct perf_evsel *evsel, - struct perf_event_attr *attr = &evsel->attr; - struct perf_evsel *leader = evsel->leader; - -- if (config->scale) { -- attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | -- PERF_FORMAT_TOTAL_TIME_RUNNING; -- } -+ attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | -+ PERF_FORMAT_TOTAL_TIME_RUNNING; - - /* - * The event is part of non trivial group, let's enable -diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c -index 9327c0ddc3a5..c3fad065c89c 100644 ---- a/tools/power/x86/turbostat/turbostat.c -+++ b/tools/power/x86/turbostat/turbostat.c -@@ -5077,6 +5077,9 @@ int fork_it(char **argv) - signal(SIGQUIT, SIG_IGN); - if (waitpid(child_pid, &status, 0) == -1) - err(status, "waitpid"); -+ -+ if (WIFEXITED(status)) -+ status = WEXITSTATUS(status); - } - /* - * n.b. fork_it() does not check for errors from for_all_cpus() diff --git a/patch/kernel/sunxi-dev/patch-5.0.9-10.patch b/patch/kernel/sunxi-dev/patch-5.0.9-10.patch new file mode 100644 index 000000000..0659014b3 --- /dev/null +++ b/patch/kernel/sunxi-dev/patch-5.0.9-10.patch @@ -0,0 +1,4117 @@ +diff --git a/Makefile b/Makefile +index ef192ca04330..b282c4143b21 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 0 +-SUBLEVEL = 9 ++SUBLEVEL = 10 + EXTRAVERSION = + NAME = Shy Crocodile + +@@ -678,8 +678,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) + KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) + + ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE +-KBUILD_CFLAGS += $(call cc-option,-Oz,-Os) +-KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,) ++KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,) + else + ifdef CONFIG_PROFILE_ALL_BRANCHES + KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,) +diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h +index e1d95f08f8e1..c7e1a7837706 100644 +--- a/arch/arm64/include/asm/futex.h ++++ b/arch/arm64/include/asm/futex.h +@@ -50,7 +50,7 @@ do { \ + static inline int + arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr) + { +- int oldval, ret, tmp; ++ int oldval = 0, ret, tmp; + u32 __user *uaddr = __uaccess_mask_ptr(_uaddr); + + pagefault_disable(); +diff --git a/arch/s390/boot/mem_detect.c b/arch/s390/boot/mem_detect.c +index 4cb771ba13fa..5d316fe40480 100644 +--- a/arch/s390/boot/mem_detect.c ++++ b/arch/s390/boot/mem_detect.c +@@ -25,7 +25,7 @@ static void *mem_detect_alloc_extended(void) + { + unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64)); + +- if (IS_ENABLED(BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE && ++ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE && + INITRD_START < offset + ENTRIES_EXTENDED_MAX) + offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64)); + +diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S +index 3b6e70d085da..8457cdd47f75 100644 +--- a/arch/x86/crypto/poly1305-avx2-x86_64.S ++++ b/arch/x86/crypto/poly1305-avx2-x86_64.S +@@ -323,6 +323,12 @@ ENTRY(poly1305_4block_avx2) + vpaddq t2,t1,t1 + vmovq t1x,d4 + ++ # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 -> ++ # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small ++ # amount. Careful: we must not assume the carry bits 'd0 >> 26', ++ # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit ++ # integers. It's true in a single-block implementation, but not here. ++ + # d1 += d0 >> 26 + mov d0,%rax + shr $26,%rax +@@ -361,16 +367,16 @@ ENTRY(poly1305_4block_avx2) + # h0 += (d4 >> 26) * 5 + mov d4,%rax + shr $26,%rax +- lea (%eax,%eax,4),%eax +- add %eax,%ebx ++ lea (%rax,%rax,4),%rax ++ add %rax,%rbx + # h4 = d4 & 0x3ffffff + mov d4,%rax + and $0x3ffffff,%eax + mov %eax,h4 + + # h1 += h0 >> 26 +- mov %ebx,%eax +- shr $26,%eax ++ mov %rbx,%rax ++ shr $26,%rax + add %eax,h1 + # h0 = h0 & 0x3ffffff + andl $0x3ffffff,%ebx +diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S +index c88c670cb5fc..5851c7418fb7 100644 +--- a/arch/x86/crypto/poly1305-sse2-x86_64.S ++++ b/arch/x86/crypto/poly1305-sse2-x86_64.S +@@ -253,16 +253,16 @@ ENTRY(poly1305_block_sse2) + # h0 += (d4 >> 26) * 5 + mov d4,%rax + shr $26,%rax +- lea (%eax,%eax,4),%eax +- add %eax,%ebx ++ lea (%rax,%rax,4),%rax ++ add %rax,%rbx + # h4 = d4 & 0x3ffffff + mov d4,%rax + and $0x3ffffff,%eax + mov %eax,h4 + + # h1 += h0 >> 26 +- mov %ebx,%eax +- shr $26,%eax ++ mov %rbx,%rax ++ shr $26,%rax + add %eax,h1 + # h0 = h0 & 0x3ffffff + andl $0x3ffffff,%ebx +@@ -520,6 +520,12 @@ ENTRY(poly1305_2block_sse2) + paddq t2,t1 + movq t1,d4 + ++ # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 -> ++ # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small ++ # amount. Careful: we must not assume the carry bits 'd0 >> 26', ++ # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit ++ # integers. It's true in a single-block implementation, but not here. ++ + # d1 += d0 >> 26 + mov d0,%rax + shr $26,%rax +@@ -558,16 +564,16 @@ ENTRY(poly1305_2block_sse2) + # h0 += (d4 >> 26) * 5 + mov d4,%rax + shr $26,%rax +- lea (%eax,%eax,4),%eax +- add %eax,%ebx ++ lea (%rax,%rax,4),%rax ++ add %rax,%rbx + # h4 = d4 & 0x3ffffff + mov d4,%rax + and $0x3ffffff,%eax + mov %eax,h4 + + # h1 += h0 >> 26 +- mov %ebx,%eax +- shr $26,%eax ++ mov %rbx,%rax ++ shr $26,%rax + add %eax,h1 + # h0 = h0 & 0x3ffffff + andl $0x3ffffff,%ebx +diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c +index 0ecfac84ba91..d45f3fbd232e 100644 +--- a/arch/x86/events/amd/core.c ++++ b/arch/x86/events/amd/core.c +@@ -117,22 +117,39 @@ static __initconst const u64 amd_hw_cache_event_ids + }; + + /* +- * AMD Performance Monitor K7 and later. ++ * AMD Performance Monitor K7 and later, up to and including Family 16h: + */ + static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] = + { +- [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, +- [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, +- [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d, +- [PERF_COUNT_HW_CACHE_MISSES] = 0x077e, +- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, +- [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, +- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ +- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */ ++ [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, ++ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, ++ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d, ++ [PERF_COUNT_HW_CACHE_MISSES] = 0x077e, ++ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, ++ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, ++ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ ++ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */ ++}; ++ ++/* ++ * AMD Performance Monitor Family 17h and later: ++ */ ++static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] = ++{ ++ [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, ++ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, ++ [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60, ++ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, ++ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, ++ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287, ++ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187, + }; + + static u64 amd_pmu_event_map(int hw_event) + { ++ if (boot_cpu_data.x86 >= 0x17) ++ return amd_f17h_perfmon_event_map[hw_event]; ++ + return amd_perfmon_event_map[hw_event]; + } + +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c +index 2480feb07df3..470d7daa915d 100644 +--- a/arch/x86/events/intel/core.c ++++ b/arch/x86/events/intel/core.c +@@ -3130,7 +3130,7 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event) + flags &= ~PERF_SAMPLE_TIME; + if (!event->attr.exclude_kernel) + flags &= ~PERF_SAMPLE_REGS_USER; +- if (event->attr.sample_regs_user & ~PEBS_REGS) ++ if (event->attr.sample_regs_user & ~PEBS_GP_REGS) + flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR); + return flags; + } +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h +index acd72e669c04..b68ab65454ff 100644 +--- a/arch/x86/events/perf_event.h ++++ b/arch/x86/events/perf_event.h +@@ -96,25 +96,25 @@ struct amd_nb { + PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \ + PERF_SAMPLE_PERIOD) + +-#define PEBS_REGS \ +- (PERF_REG_X86_AX | \ +- PERF_REG_X86_BX | \ +- PERF_REG_X86_CX | \ +- PERF_REG_X86_DX | \ +- PERF_REG_X86_DI | \ +- PERF_REG_X86_SI | \ +- PERF_REG_X86_SP | \ +- PERF_REG_X86_BP | \ +- PERF_REG_X86_IP | \ +- PERF_REG_X86_FLAGS | \ +- PERF_REG_X86_R8 | \ +- PERF_REG_X86_R9 | \ +- PERF_REG_X86_R10 | \ +- PERF_REG_X86_R11 | \ +- PERF_REG_X86_R12 | \ +- PERF_REG_X86_R13 | \ +- PERF_REG_X86_R14 | \ +- PERF_REG_X86_R15) ++#define PEBS_GP_REGS \ ++ ((1ULL << PERF_REG_X86_AX) | \ ++ (1ULL << PERF_REG_X86_BX) | \ ++ (1ULL << PERF_REG_X86_CX) | \ ++ (1ULL << PERF_REG_X86_DX) | \ ++ (1ULL << PERF_REG_X86_DI) | \ ++ (1ULL << PERF_REG_X86_SI) | \ ++ (1ULL << PERF_REG_X86_SP) | \ ++ (1ULL << PERF_REG_X86_BP) | \ ++ (1ULL << PERF_REG_X86_IP) | \ ++ (1ULL << PERF_REG_X86_FLAGS) | \ ++ (1ULL << PERF_REG_X86_R8) | \ ++ (1ULL << PERF_REG_X86_R9) | \ ++ (1ULL << PERF_REG_X86_R10) | \ ++ (1ULL << PERF_REG_X86_R11) | \ ++ (1ULL << PERF_REG_X86_R12) | \ ++ (1ULL << PERF_REG_X86_R13) | \ ++ (1ULL << PERF_REG_X86_R14) | \ ++ (1ULL << PERF_REG_X86_R15)) + + /* + * Per register state. +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 01874d54f4fd..482383c2b184 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -275,7 +275,7 @@ static const struct { + const char *option; + enum spectre_v2_user_cmd cmd; + bool secure; +-} v2_user_options[] __initdata = { ++} v2_user_options[] __initconst = { + { "auto", SPECTRE_V2_USER_CMD_AUTO, false }, + { "off", SPECTRE_V2_USER_CMD_NONE, false }, + { "on", SPECTRE_V2_USER_CMD_FORCE, true }, +@@ -419,7 +419,7 @@ static const struct { + const char *option; + enum spectre_v2_mitigation_cmd cmd; + bool secure; +-} mitigation_options[] __initdata = { ++} mitigation_options[] __initconst = { + { "off", SPECTRE_V2_CMD_NONE, false }, + { "on", SPECTRE_V2_CMD_FORCE, true }, + { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, +@@ -658,7 +658,7 @@ static const char * const ssb_strings[] = { + static const struct { + const char *option; + enum ssb_mitigation_cmd cmd; +-} ssb_mitigation_options[] __initdata = { ++} ssb_mitigation_options[] __initconst = { + { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ + { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ + { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c +index 4ba75afba527..f4b954ff5b89 100644 +--- a/arch/x86/kernel/kprobes/core.c ++++ b/arch/x86/kernel/kprobes/core.c +@@ -569,6 +569,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) + unsigned long *sara = stack_addr(regs); + + ri->ret_addr = (kprobe_opcode_t *) *sara; ++ ri->fp = sara; + + /* Replace the return addr with trampoline addr */ + *sara = (unsigned long) &kretprobe_trampoline; +@@ -748,26 +749,48 @@ asm( + NOKPROBE_SYMBOL(kretprobe_trampoline); + STACK_FRAME_NON_STANDARD(kretprobe_trampoline); + ++static struct kprobe kretprobe_kprobe = { ++ .addr = (void *)kretprobe_trampoline, ++}; ++ + /* + * Called from kretprobe_trampoline + */ + static __used void *trampoline_handler(struct pt_regs *regs) + { ++ struct kprobe_ctlblk *kcb; + struct kretprobe_instance *ri = NULL; + struct hlist_head *head, empty_rp; + struct hlist_node *tmp; + unsigned long flags, orig_ret_address = 0; + unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; + kprobe_opcode_t *correct_ret_addr = NULL; ++ void *frame_pointer; ++ bool skipped = false; ++ ++ preempt_disable(); ++ ++ /* ++ * Set a dummy kprobe for avoiding kretprobe recursion. ++ * Since kretprobe never run in kprobe handler, kprobe must not ++ * be running at this point. ++ */ ++ kcb = get_kprobe_ctlblk(); ++ __this_cpu_write(current_kprobe, &kretprobe_kprobe); ++ kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + INIT_HLIST_HEAD(&empty_rp); + kretprobe_hash_lock(current, &head, &flags); + /* fixup registers */ + #ifdef CONFIG_X86_64 + regs->cs = __KERNEL_CS; ++ /* On x86-64, we use pt_regs->sp for return address holder. */ ++ frame_pointer = ®s->sp; + #else + regs->cs = __KERNEL_CS | get_kernel_rpl(); + regs->gs = 0; ++ /* On x86-32, we use pt_regs->flags for return address holder. */ ++ frame_pointer = ®s->flags; + #endif + regs->ip = trampoline_address; + regs->orig_ax = ~0UL; +@@ -789,8 +812,25 @@ static __used void *trampoline_handler(struct pt_regs *regs) + if (ri->task != current) + /* another task is sharing our hash bucket */ + continue; ++ /* ++ * Return probes must be pushed on this hash list correct ++ * order (same as return order) so that it can be poped ++ * correctly. However, if we find it is pushed it incorrect ++ * order, this means we find a function which should not be ++ * probed, because the wrong order entry is pushed on the ++ * path of processing other kretprobe itself. ++ */ ++ if (ri->fp != frame_pointer) { ++ if (!skipped) ++ pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n"); ++ skipped = true; ++ continue; ++ } + + orig_ret_address = (unsigned long)ri->ret_addr; ++ if (skipped) ++ pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n", ++ ri->rp->kp.addr); + + if (orig_ret_address != trampoline_address) + /* +@@ -808,14 +848,15 @@ static __used void *trampoline_handler(struct pt_regs *regs) + if (ri->task != current) + /* another task is sharing our hash bucket */ + continue; ++ if (ri->fp != frame_pointer) ++ continue; + + orig_ret_address = (unsigned long)ri->ret_addr; + if (ri->rp && ri->rp->handler) { + __this_cpu_write(current_kprobe, &ri->rp->kp); +- get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; + ri->ret_addr = correct_ret_addr; + ri->rp->handler(ri, regs); +- __this_cpu_write(current_kprobe, NULL); ++ __this_cpu_write(current_kprobe, &kretprobe_kprobe); + } + + recycle_rp_inst(ri, &empty_rp); +@@ -831,6 +872,9 @@ static __used void *trampoline_handler(struct pt_regs *regs) + + kretprobe_hash_unlock(current, &flags); + ++ __this_cpu_write(current_kprobe, NULL); ++ preempt_enable(); ++ + hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { + hlist_del(&ri->hlist); + kfree(ri); +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c +index 90ae0ca51083..9db049f06f2f 100644 +--- a/arch/x86/kernel/process.c ++++ b/arch/x86/kernel/process.c +@@ -414,6 +414,8 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp, + u64 msr = x86_spec_ctrl_base; + bool updmsr = false; + ++ lockdep_assert_irqs_disabled(); ++ + /* + * If TIF_SSBD is different, select the proper mitigation + * method. Note that if SSBD mitigation is disabled or permanentely +@@ -465,10 +467,12 @@ static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk) + + void speculation_ctrl_update(unsigned long tif) + { ++ unsigned long flags; ++ + /* Forced update. Make sure all relevant TIF flags are different */ +- preempt_disable(); ++ local_irq_save(flags); + __speculation_ctrl_update(~tif, tif); +- preempt_enable(); ++ local_irq_restore(flags); + } + + /* Called from seccomp/prctl update */ +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index c338984c850d..81be2165821f 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -2575,15 +2575,13 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) + * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU + * supports long mode. + */ +- cr4 = ctxt->ops->get_cr(ctxt, 4); + if (emulator_has_longmode(ctxt)) { + struct desc_struct cs_desc; + + /* Zero CR4.PCIDE before CR0.PG. */ +- if (cr4 & X86_CR4_PCIDE) { ++ cr4 = ctxt->ops->get_cr(ctxt, 4); ++ if (cr4 & X86_CR4_PCIDE) + ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE); +- cr4 &= ~X86_CR4_PCIDE; +- } + + /* A 32-bit code segment is required to clear EFER.LMA. */ + memset(&cs_desc, 0, sizeof(cs_desc)); +@@ -2597,13 +2595,16 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) + if (cr0 & X86_CR0_PE) + ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE)); + +- /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */ +- if (cr4 & X86_CR4_PAE) +- ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE); ++ if (emulator_has_longmode(ctxt)) { ++ /* Clear CR4.PAE before clearing EFER.LME. */ ++ cr4 = ctxt->ops->get_cr(ctxt, 4); ++ if (cr4 & X86_CR4_PAE) ++ ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE); + +- /* And finally go back to 32-bit mode. */ +- efer = 0; +- ctxt->ops->set_msr(ctxt, MSR_EFER, efer); ++ /* And finally go back to 32-bit mode. */ ++ efer = 0; ++ ctxt->ops->set_msr(ctxt, MSR_EFER, efer); ++ } + + smbase = ctxt->ops->get_smbase(ctxt); + +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index a9b8e38d78ad..516c1de03d47 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -2687,6 +2687,7 @@ static int npf_interception(struct vcpu_svm *svm) + static int db_interception(struct vcpu_svm *svm) + { + struct kvm_run *kvm_run = svm->vcpu.run; ++ struct kvm_vcpu *vcpu = &svm->vcpu; + + if (!(svm->vcpu.guest_debug & + (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && +@@ -2697,6 +2698,8 @@ static int db_interception(struct vcpu_svm *svm) + + if (svm->nmi_singlestep) { + disable_nmi_singlestep(svm); ++ /* Make sure we check for pending NMIs upon entry */ ++ kvm_make_request(KVM_REQ_EVENT, vcpu); + } + + if (svm->vcpu.guest_debug & +@@ -4512,14 +4515,25 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm) + kvm_lapic_reg_write(apic, APIC_ICR, icrl); + break; + case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: { ++ int i; ++ struct kvm_vcpu *vcpu; ++ struct kvm *kvm = svm->vcpu.kvm; + struct kvm_lapic *apic = svm->vcpu.arch.apic; + + /* +- * Update ICR high and low, then emulate sending IPI, +- * which is handled when writing APIC_ICR. ++ * At this point, we expect that the AVIC HW has already ++ * set the appropriate IRR bits on the valid target ++ * vcpus. So, we just need to kick the appropriate vcpu. + */ +- kvm_lapic_reg_write(apic, APIC_ICR2, icrh); +- kvm_lapic_reg_write(apic, APIC_ICR, icrl); ++ kvm_for_each_vcpu(i, vcpu, kvm) { ++ bool m = kvm_apic_match_dest(vcpu, apic, ++ icrl & KVM_APIC_SHORT_MASK, ++ GET_APIC_DEST_FIELD(icrh), ++ icrl & KVM_APIC_DEST_MASK); ++ ++ if (m && !avic_vcpu_is_running(vcpu)) ++ kvm_vcpu_wake_up(vcpu); ++ } + break; + } + case AVIC_IPI_FAILURE_INVALID_TARGET: +@@ -5620,6 +5634,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) + svm->vmcb->save.cr2 = vcpu->arch.cr2; + + clgi(); ++ kvm_load_guest_xcr0(vcpu); + + /* + * If this vCPU has touched SPEC_CTRL, restore the guest's value if +@@ -5765,6 +5780,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) + if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) + kvm_before_interrupt(&svm->vcpu); + ++ kvm_put_guest_xcr0(vcpu); + stgi(); + + /* Any pending NMI will happen here */ +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index a0a770816429..34499081022c 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -6548,6 +6548,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) + vmx_set_interrupt_shadow(vcpu, 0); + ++ kvm_load_guest_xcr0(vcpu); ++ + if (static_cpu_has(X86_FEATURE_PKU) && + kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && + vcpu->arch.pkru != vmx->host_pkru) +@@ -6635,6 +6637,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) + __write_pkru(vmx->host_pkru); + } + ++ kvm_put_guest_xcr0(vcpu); ++ + vmx->nested.nested_run_pending = 0; + vmx->idt_vectoring_info = 0; + +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 7ee802a92bc8..2db58067bb59 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -800,7 +800,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) + } + EXPORT_SYMBOL_GPL(kvm_lmsw); + +-static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) ++void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) + { + if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && + !vcpu->guest_xcr0_loaded) { +@@ -810,8 +810,9 @@ static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) + vcpu->guest_xcr0_loaded = 1; + } + } ++EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0); + +-static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) ++void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) + { + if (vcpu->guest_xcr0_loaded) { + if (vcpu->arch.xcr0 != host_xcr0) +@@ -819,6 +820,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) + vcpu->guest_xcr0_loaded = 0; + } + } ++EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0); + + static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) + { +@@ -7856,8 +7858,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) + goto cancel_injection; + } + +- kvm_load_guest_xcr0(vcpu); +- + if (req_immediate_exit) { + kvm_make_request(KVM_REQ_EVENT, vcpu); + kvm_x86_ops->request_immediate_exit(vcpu); +@@ -7910,8 +7910,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) + vcpu->mode = OUTSIDE_GUEST_MODE; + smp_wmb(); + +- kvm_put_guest_xcr0(vcpu); +- + kvm_before_interrupt(vcpu); + kvm_x86_ops->handle_external_intr(vcpu); + kvm_after_interrupt(vcpu); +diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h +index 20ede17202bf..de3d46769ee3 100644 +--- a/arch/x86/kvm/x86.h ++++ b/arch/x86/kvm/x86.h +@@ -347,4 +347,6 @@ static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu) + __this_cpu_write(current_vcpu, NULL); + } + ++void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu); ++void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu); + #endif +diff --git a/crypto/testmgr.h b/crypto/testmgr.h +index ca8e8ebef309..db496aa360a3 100644 +--- a/crypto/testmgr.h ++++ b/crypto/testmgr.h +@@ -5706,7 +5706,49 @@ static const struct hash_testvec poly1305_tv_template[] = { + .psize = 80, + .digest = "\x13\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", +- }, ++ }, { /* Regression test for overflow in AVX2 implementation */ ++ .plaintext = "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff\xff\xff\xff\xff" ++ "\xff\xff\xff\xff", ++ .psize = 300, ++ .digest = "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8" ++ "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1", ++ } + }; + + /* NHPoly1305 test vectors from https://github.com/google/adiantum */ +diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c +index f75f8f870ce3..4be4dc3e8aa6 100644 +--- a/drivers/acpi/nfit/core.c ++++ b/drivers/acpi/nfit/core.c +@@ -1319,19 +1319,30 @@ static ssize_t scrub_show(struct device *dev, + struct device_attribute *attr, char *buf) + { + struct nvdimm_bus_descriptor *nd_desc; ++ struct acpi_nfit_desc *acpi_desc; + ssize_t rc = -ENXIO; ++ bool busy; + + device_lock(dev); + nd_desc = dev_get_drvdata(dev); +- if (nd_desc) { +- struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); ++ if (!nd_desc) { ++ device_unlock(dev); ++ return rc; ++ } ++ acpi_desc = to_acpi_desc(nd_desc); + +- mutex_lock(&acpi_desc->init_mutex); +- rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, +- acpi_desc->scrub_busy +- && !acpi_desc->cancel ? "+\n" : "\n"); +- mutex_unlock(&acpi_desc->init_mutex); ++ mutex_lock(&acpi_desc->init_mutex); ++ busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags) ++ && !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags); ++ rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n"); ++ /* Allow an admin to poll the busy state at a higher rate */ ++ if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL, ++ &acpi_desc->scrub_flags)) { ++ acpi_desc->scrub_tmo = 1; ++ mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ); + } ++ ++ mutex_unlock(&acpi_desc->init_mutex); + device_unlock(dev); + return rc; + } +@@ -2650,7 +2661,10 @@ static int ars_start(struct acpi_nfit_desc *acpi_desc, + + if (rc < 0) + return rc; +- return cmd_rc; ++ if (cmd_rc < 0) ++ return cmd_rc; ++ set_bit(ARS_VALID, &acpi_desc->scrub_flags); ++ return 0; + } + + static int ars_continue(struct acpi_nfit_desc *acpi_desc) +@@ -2660,11 +2674,11 @@ static int ars_continue(struct acpi_nfit_desc *acpi_desc) + struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; + struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; + +- memset(&ars_start, 0, sizeof(ars_start)); +- ars_start.address = ars_status->restart_address; +- ars_start.length = ars_status->restart_length; +- ars_start.type = ars_status->type; +- ars_start.flags = acpi_desc->ars_start_flags; ++ ars_start = (struct nd_cmd_ars_start) { ++ .address = ars_status->restart_address, ++ .length = ars_status->restart_length, ++ .type = ars_status->type, ++ }; + rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, + sizeof(ars_start), &cmd_rc); + if (rc < 0) +@@ -2743,6 +2757,17 @@ static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) + */ + if (ars_status->out_length < 44) + return 0; ++ ++ /* ++ * Ignore potentially stale results that are only refreshed ++ * after a start-ARS event. ++ */ ++ if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) { ++ dev_dbg(acpi_desc->dev, "skip %d stale records\n", ++ ars_status->num_records); ++ return 0; ++ } ++ + for (i = 0; i < ars_status->num_records; i++) { + /* only process full records */ + if (ars_status->out_length +@@ -3081,7 +3106,7 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, + + lockdep_assert_held(&acpi_desc->init_mutex); + +- if (acpi_desc->cancel) ++ if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) + return 0; + + if (query_rc == -EBUSY) { +@@ -3155,7 +3180,7 @@ static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo) + { + lockdep_assert_held(&acpi_desc->init_mutex); + +- acpi_desc->scrub_busy = 1; ++ set_bit(ARS_BUSY, &acpi_desc->scrub_flags); + /* note this should only be set from within the workqueue */ + if (tmo) + acpi_desc->scrub_tmo = tmo; +@@ -3171,7 +3196,7 @@ static void notify_ars_done(struct acpi_nfit_desc *acpi_desc) + { + lockdep_assert_held(&acpi_desc->init_mutex); + +- acpi_desc->scrub_busy = 0; ++ clear_bit(ARS_BUSY, &acpi_desc->scrub_flags); + acpi_desc->scrub_count++; + if (acpi_desc->scrub_count_state) + sysfs_notify_dirent(acpi_desc->scrub_count_state); +@@ -3192,6 +3217,7 @@ static void acpi_nfit_scrub(struct work_struct *work) + else + notify_ars_done(acpi_desc); + memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); ++ clear_bit(ARS_POLL, &acpi_desc->scrub_flags); + mutex_unlock(&acpi_desc->init_mutex); + } + +@@ -3226,6 +3252,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) + struct nfit_spa *nfit_spa; + int rc; + ++ set_bit(ARS_VALID, &acpi_desc->scrub_flags); + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { + switch (nfit_spa_type(nfit_spa->spa)) { + case NFIT_SPA_VOLATILE: +@@ -3460,7 +3487,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, + struct nfit_spa *nfit_spa; + + mutex_lock(&acpi_desc->init_mutex); +- if (acpi_desc->cancel) { ++ if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) { + mutex_unlock(&acpi_desc->init_mutex); + return 0; + } +@@ -3539,7 +3566,7 @@ void acpi_nfit_shutdown(void *data) + mutex_unlock(&acpi_desc_lock); + + mutex_lock(&acpi_desc->init_mutex); +- acpi_desc->cancel = 1; ++ set_bit(ARS_CANCEL, &acpi_desc->scrub_flags); + cancel_delayed_work_sync(&acpi_desc->dwork); + mutex_unlock(&acpi_desc->init_mutex); + +diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h +index 33691aecfcee..0cbe5009eb2c 100644 +--- a/drivers/acpi/nfit/nfit.h ++++ b/drivers/acpi/nfit/nfit.h +@@ -210,6 +210,13 @@ struct nfit_mem { + int family; + }; + ++enum scrub_flags { ++ ARS_BUSY, ++ ARS_CANCEL, ++ ARS_VALID, ++ ARS_POLL, ++}; ++ + struct acpi_nfit_desc { + struct nvdimm_bus_descriptor nd_desc; + struct acpi_table_header acpi_header; +@@ -223,7 +230,6 @@ struct acpi_nfit_desc { + struct list_head idts; + struct nvdimm_bus *nvdimm_bus; + struct device *dev; +- u8 ars_start_flags; + struct nd_cmd_ars_status *ars_status; + struct nfit_spa *scrub_spa; + struct delayed_work dwork; +@@ -232,8 +238,7 @@ struct acpi_nfit_desc { + unsigned int max_ars; + unsigned int scrub_count; + unsigned int scrub_mode; +- unsigned int scrub_busy:1; +- unsigned int cancel:1; ++ unsigned long scrub_flags; + unsigned long dimm_cmd_force_en; + unsigned long bus_cmd_force_en; + unsigned long bus_nfit_cmd_force_en; +diff --git a/drivers/base/memory.c b/drivers/base/memory.c +index 048cbf7d5233..23125f276ff1 100644 +--- a/drivers/base/memory.c ++++ b/drivers/base/memory.c +@@ -505,7 +505,7 @@ static ssize_t probe_store(struct device *dev, struct device_attribute *attr, + + ret = lock_device_hotplug_sysfs(); + if (ret) +- goto out; ++ return ret; + + nid = memory_add_physaddr_to_nid(phys_addr); + ret = __add_memory(nid, phys_addr, +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c +index c518659b4d9f..ff9dd9adf803 100644 +--- a/drivers/char/ipmi/ipmi_msghandler.c ++++ b/drivers/char/ipmi/ipmi_msghandler.c +@@ -214,6 +214,9 @@ struct ipmi_user { + + /* Does this interface receive IPMI events? */ + bool gets_events; ++ ++ /* Free must run in process context for RCU cleanup. */ ++ struct work_struct remove_work; + }; + + static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) +@@ -1079,6 +1082,15 @@ static int intf_err_seq(struct ipmi_smi *intf, + } + + ++static void free_user_work(struct work_struct *work) ++{ ++ struct ipmi_user *user = container_of(work, struct ipmi_user, ++ remove_work); ++ ++ cleanup_srcu_struct(&user->release_barrier); ++ kfree(user); ++} ++ + int ipmi_create_user(unsigned int if_num, + const struct ipmi_user_hndl *handler, + void *handler_data, +@@ -1122,6 +1134,8 @@ int ipmi_create_user(unsigned int if_num, + goto out_kfree; + + found: ++ INIT_WORK(&new_user->remove_work, free_user_work); ++ + rv = init_srcu_struct(&new_user->release_barrier); + if (rv) + goto out_kfree; +@@ -1184,8 +1198,9 @@ EXPORT_SYMBOL(ipmi_get_smi_info); + static void free_user(struct kref *ref) + { + struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); +- cleanup_srcu_struct(&user->release_barrier); +- kfree(user); ++ ++ /* SRCU cleanup must happen in task context. */ ++ schedule_work(&user->remove_work); + } + + static void _ipmi_destroy_user(struct ipmi_user *user) +diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c +index 1b8fa9de2cac..41b9f6c92da7 100644 +--- a/drivers/char/tpm/eventlog/tpm2.c ++++ b/drivers/char/tpm/eventlog/tpm2.c +@@ -37,8 +37,8 @@ + * + * Returns size of the event. If it is an invalid event, returns 0. + */ +-static int calc_tpm2_event_size(struct tcg_pcr_event2 *event, +- struct tcg_pcr_event *event_header) ++static size_t calc_tpm2_event_size(struct tcg_pcr_event2 *event, ++ struct tcg_pcr_event *event_header) + { + struct tcg_efi_specid_event *efispecid; + struct tcg_event_field *event_field; +diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c +index 5eecad233ea1..744b0237300a 100644 +--- a/drivers/char/tpm/tpm-dev-common.c ++++ b/drivers/char/tpm/tpm-dev-common.c +@@ -203,12 +203,19 @@ __poll_t tpm_common_poll(struct file *file, poll_table *wait) + __poll_t mask = 0; + + poll_wait(file, &priv->async_wait, wait); ++ mutex_lock(&priv->buffer_mutex); + +- if (!priv->response_read || priv->response_length) ++ /* ++ * The response_length indicates if there is still response ++ * (or part of it) to be consumed. Partial reads decrease it ++ * by the number of bytes read, and write resets it the zero. ++ */ ++ if (priv->response_length) + mask = EPOLLIN | EPOLLRDNORM; + else + mask = EPOLLOUT | EPOLLWRNORM; + ++ mutex_unlock(&priv->buffer_mutex); + return mask; + } + +diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c +index 32a8e27c5382..cc4e642d3180 100644 +--- a/drivers/char/tpm/tpm_i2c_atmel.c ++++ b/drivers/char/tpm/tpm_i2c_atmel.c +@@ -69,6 +69,10 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) + if (status < 0) + return status; + ++ /* The upper layer does not support incomplete sends. */ ++ if (status != len) ++ return -E2BIG; ++ + return 0; + } + +diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +index d0d966d6080a..1696644ec022 100644 +--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +@@ -182,6 +182,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 6); + } ++ WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp); + + tmp = mmVM_L2_CNTL4_DEFAULT; + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); +diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c +index f841accc2c00..f77c81db161b 100644 +--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c ++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c +@@ -730,7 +730,8 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, + } + + #ifdef CONFIG_TRANSPARENT_HUGEPAGE +- if (!(flags & TTM_PAGE_FLAG_DMA32)) { ++ if (!(flags & TTM_PAGE_FLAG_DMA32) && ++ (npages - i) >= HPAGE_PMD_NR) { + for (j = 0; j < HPAGE_PMD_NR; ++j) + if (p++ != pages[i + j]) + break; +@@ -759,7 +760,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, + unsigned max_size, n2free; + + spin_lock_irqsave(&huge->lock, irq_flags); +- while (i < npages) { ++ while ((npages - i) >= HPAGE_PMD_NR) { + struct page *p = pages[i]; + unsigned j; + +diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c +index 2dc628d4f1ae..1412abcff010 100644 +--- a/drivers/i3c/master.c ++++ b/drivers/i3c/master.c +@@ -1980,7 +1980,6 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master, + { + struct i3c_dev_boardinfo *boardinfo; + struct device *dev = &master->dev; +- struct i3c_device_info info = { }; + enum i3c_addr_slot_status addrstatus; + u32 init_dyn_addr = 0; + +@@ -2012,8 +2011,8 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master, + + boardinfo->pid = ((u64)reg[1] << 32) | reg[2]; + +- if ((info.pid & GENMASK_ULL(63, 48)) || +- I3C_PID_RND_LOWER_32BITS(info.pid)) ++ if ((boardinfo->pid & GENMASK_ULL(63, 48)) || ++ I3C_PID_RND_LOWER_32BITS(boardinfo->pid)) + return -EINVAL; + + boardinfo->init_dyn_addr = init_dyn_addr; +diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c +index bb03079fbade..ec385fbfef4c 100644 +--- a/drivers/i3c/master/dw-i3c-master.c ++++ b/drivers/i3c/master/dw-i3c-master.c +@@ -300,7 +300,7 @@ to_dw_i3c_master(struct i3c_master_controller *master) + + static void dw_i3c_master_disable(struct dw_i3c_master *master) + { +- writel(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_ENABLE, ++ writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_ENABLE, + master->regs + DEVICE_CTRL); + } + +diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c +index 7096e577b23f..50f3ff386bea 100644 +--- a/drivers/iio/accel/kxcjk-1013.c ++++ b/drivers/iio/accel/kxcjk-1013.c +@@ -1437,6 +1437,8 @@ static int kxcjk1013_resume(struct device *dev) + + mutex_lock(&data->mutex); + ret = kxcjk1013_set_mode(data, OPERATION); ++ if (ret == 0) ++ ret = kxcjk1013_set_range(data, data->range); + mutex_unlock(&data->mutex); + + return ret; +diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c +index ff5f2da2e1b1..54d9978b2740 100644 +--- a/drivers/iio/adc/ad_sigma_delta.c ++++ b/drivers/iio/adc/ad_sigma_delta.c +@@ -121,6 +121,7 @@ static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta, + if (sigma_delta->info->has_registers) { + data[0] = reg << sigma_delta->info->addr_shift; + data[0] |= sigma_delta->info->read_mask; ++ data[0] |= sigma_delta->comm; + spi_message_add_tail(&t[0], &m); + } + spi_message_add_tail(&t[1], &m); +diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c +index 75d2f73582a3..596841a3c4db 100644 +--- a/drivers/iio/adc/at91_adc.c ++++ b/drivers/iio/adc/at91_adc.c +@@ -704,23 +704,29 @@ static int at91_adc_read_raw(struct iio_dev *idev, + ret = wait_event_interruptible_timeout(st->wq_data_avail, + st->done, + msecs_to_jiffies(1000)); +- if (ret == 0) +- ret = -ETIMEDOUT; +- if (ret < 0) { +- mutex_unlock(&st->lock); +- return ret; +- } +- +- *val = st->last_value; + ++ /* Disable interrupts, regardless if adc conversion was ++ * successful or not ++ */ + at91_adc_writel(st, AT91_ADC_CHDR, + AT91_ADC_CH(chan->channel)); + at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel)); + +- st->last_value = 0; +- st->done = false; ++ if (ret > 0) { ++ /* a valid conversion took place */ ++ *val = st->last_value; ++ st->last_value = 0; ++ st->done = false; ++ ret = IIO_VAL_INT; ++ } else if (ret == 0) { ++ /* conversion timeout */ ++ dev_err(&idev->dev, "ADC Channel %d timeout.\n", ++ chan->channel); ++ ret = -ETIMEDOUT; ++ } ++ + mutex_unlock(&st->lock); +- return IIO_VAL_INT; ++ return ret; + + case IIO_CHAN_INFO_SCALE: + *val = st->vref_mv; +diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h +index 0ae89b87e2d6..4edc5d21cb9f 100644 +--- a/drivers/iio/chemical/bme680.h ++++ b/drivers/iio/chemical/bme680.h +@@ -2,11 +2,9 @@ + #ifndef BME680_H_ + #define BME680_H_ + +-#define BME680_REG_CHIP_I2C_ID 0xD0 +-#define BME680_REG_CHIP_SPI_ID 0x50 ++#define BME680_REG_CHIP_ID 0xD0 + #define BME680_CHIP_ID_VAL 0x61 +-#define BME680_REG_SOFT_RESET_I2C 0xE0 +-#define BME680_REG_SOFT_RESET_SPI 0x60 ++#define BME680_REG_SOFT_RESET 0xE0 + #define BME680_CMD_SOFTRESET 0xB6 + #define BME680_REG_STATUS 0x73 + #define BME680_SPI_MEM_PAGE_BIT BIT(4) +diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c +index 70c1fe4366f4..ccde4c65ff93 100644 +--- a/drivers/iio/chemical/bme680_core.c ++++ b/drivers/iio/chemical/bme680_core.c +@@ -63,9 +63,23 @@ struct bme680_data { + s32 t_fine; + }; + ++static const struct regmap_range bme680_volatile_ranges[] = { ++ regmap_reg_range(BME680_REG_MEAS_STAT_0, BME680_REG_GAS_R_LSB), ++ regmap_reg_range(BME680_REG_STATUS, BME680_REG_STATUS), ++ regmap_reg_range(BME680_T2_LSB_REG, BME680_GH3_REG), ++}; ++ ++static const struct regmap_access_table bme680_volatile_table = { ++ .yes_ranges = bme680_volatile_ranges, ++ .n_yes_ranges = ARRAY_SIZE(bme680_volatile_ranges), ++}; ++ + const struct regmap_config bme680_regmap_config = { + .reg_bits = 8, + .val_bits = 8, ++ .max_register = 0xef, ++ .volatile_table = &bme680_volatile_table, ++ .cache_type = REGCACHE_RBTREE, + }; + EXPORT_SYMBOL(bme680_regmap_config); + +@@ -316,6 +330,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data, + s64 var1, var2, var3; + s16 calc_temp; + ++ /* If the calibration is invalid, attempt to reload it */ ++ if (!calib->par_t2) ++ bme680_read_calib(data, calib); ++ + var1 = (adc_temp >> 3) - (calib->par_t1 << 1); + var2 = (var1 * calib->par_t2) >> 11; + var3 = ((var1 >> 1) * (var1 >> 1)) >> 12; +@@ -583,8 +601,7 @@ static int bme680_gas_config(struct bme680_data *data) + return ret; + } + +-static int bme680_read_temp(struct bme680_data *data, +- int *val, int *val2) ++static int bme680_read_temp(struct bme680_data *data, int *val) + { + struct device *dev = regmap_get_device(data->regmap); + int ret; +@@ -617,10 +634,9 @@ static int bme680_read_temp(struct bme680_data *data, + * compensate_press/compensate_humid to get compensated + * pressure/humidity readings. + */ +- if (val && val2) { +- *val = comp_temp; +- *val2 = 100; +- return IIO_VAL_FRACTIONAL; ++ if (val) { ++ *val = comp_temp * 10; /* Centidegrees to millidegrees */ ++ return IIO_VAL_INT; + } + + return ret; +@@ -635,7 +651,7 @@ static int bme680_read_press(struct bme680_data *data, + s32 adc_press; + + /* Read and compensate temperature to get a reading of t_fine */ +- ret = bme680_read_temp(data, NULL, NULL); ++ ret = bme680_read_temp(data, NULL); + if (ret < 0) + return ret; + +@@ -668,7 +684,7 @@ static int bme680_read_humid(struct bme680_data *data, + u32 comp_humidity; + + /* Read and compensate temperature to get a reading of t_fine */ +- ret = bme680_read_temp(data, NULL, NULL); ++ ret = bme680_read_temp(data, NULL); + if (ret < 0) + return ret; + +@@ -761,7 +777,7 @@ static int bme680_read_raw(struct iio_dev *indio_dev, + case IIO_CHAN_INFO_PROCESSED: + switch (chan->type) { + case IIO_TEMP: +- return bme680_read_temp(data, val, val2); ++ return bme680_read_temp(data, val); + case IIO_PRESSURE: + return bme680_read_press(data, val, val2); + case IIO_HUMIDITYRELATIVE: +@@ -867,8 +883,28 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap, + { + struct iio_dev *indio_dev; + struct bme680_data *data; ++ unsigned int val; + int ret; + ++ ret = regmap_write(regmap, BME680_REG_SOFT_RESET, ++ BME680_CMD_SOFTRESET); ++ if (ret < 0) { ++ dev_err(dev, "Failed to reset chip\n"); ++ return ret; ++ } ++ ++ ret = regmap_read(regmap, BME680_REG_CHIP_ID, &val); ++ if (ret < 0) { ++ dev_err(dev, "Error reading chip ID\n"); ++ return ret; ++ } ++ ++ if (val != BME680_CHIP_ID_VAL) { ++ dev_err(dev, "Wrong chip ID, got %x expected %x\n", ++ val, BME680_CHIP_ID_VAL); ++ return -ENODEV; ++ } ++ + indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); + if (!indio_dev) + return -ENOMEM; +diff --git a/drivers/iio/chemical/bme680_i2c.c b/drivers/iio/chemical/bme680_i2c.c +index 06d4be539d2e..cfc4449edf1b 100644 +--- a/drivers/iio/chemical/bme680_i2c.c ++++ b/drivers/iio/chemical/bme680_i2c.c +@@ -23,8 +23,6 @@ static int bme680_i2c_probe(struct i2c_client *client, + { + struct regmap *regmap; + const char *name = NULL; +- unsigned int val; +- int ret; + + regmap = devm_regmap_init_i2c(client, &bme680_regmap_config); + if (IS_ERR(regmap)) { +@@ -33,25 +31,6 @@ static int bme680_i2c_probe(struct i2c_client *client, + return PTR_ERR(regmap); + } + +- ret = regmap_write(regmap, BME680_REG_SOFT_RESET_I2C, +- BME680_CMD_SOFTRESET); +- if (ret < 0) { +- dev_err(&client->dev, "Failed to reset chip\n"); +- return ret; +- } +- +- ret = regmap_read(regmap, BME680_REG_CHIP_I2C_ID, &val); +- if (ret < 0) { +- dev_err(&client->dev, "Error reading I2C chip ID\n"); +- return ret; +- } +- +- if (val != BME680_CHIP_ID_VAL) { +- dev_err(&client->dev, "Wrong chip ID, got %x expected %x\n", +- val, BME680_CHIP_ID_VAL); +- return -ENODEV; +- } +- + if (id) + name = id->name; + +diff --git a/drivers/iio/chemical/bme680_spi.c b/drivers/iio/chemical/bme680_spi.c +index c9fb05e8d0b9..881778e55d38 100644 +--- a/drivers/iio/chemical/bme680_spi.c ++++ b/drivers/iio/chemical/bme680_spi.c +@@ -11,28 +11,93 @@ + + #include "bme680.h" + ++struct bme680_spi_bus_context { ++ struct spi_device *spi; ++ u8 current_page; ++}; ++ ++/* ++ * In SPI mode there are only 7 address bits, a "page" register determines ++ * which part of the 8-bit range is active. This function looks at the address ++ * and writes the page selection bit if needed ++ */ ++static int bme680_regmap_spi_select_page( ++ struct bme680_spi_bus_context *ctx, u8 reg) ++{ ++ struct spi_device *spi = ctx->spi; ++ int ret; ++ u8 buf[2]; ++ u8 page = (reg & 0x80) ? 0 : 1; /* Page "1" is low range */ ++ ++ if (page == ctx->current_page) ++ return 0; ++ ++ /* ++ * Data sheet claims we're only allowed to change bit 4, so we must do ++ * a read-modify-write on each and every page select ++ */ ++ buf[0] = BME680_REG_STATUS; ++ ret = spi_write_then_read(spi, buf, 1, buf + 1, 1); ++ if (ret < 0) { ++ dev_err(&spi->dev, "failed to set page %u\n", page); ++ return ret; ++ } ++ ++ buf[0] = BME680_REG_STATUS; ++ if (page) ++ buf[1] |= BME680_SPI_MEM_PAGE_BIT; ++ else ++ buf[1] &= ~BME680_SPI_MEM_PAGE_BIT; ++ ++ ret = spi_write(spi, buf, 2); ++ if (ret < 0) { ++ dev_err(&spi->dev, "failed to set page %u\n", page); ++ return ret; ++ } ++ ++ ctx->current_page = page; ++ ++ return 0; ++} ++ + static int bme680_regmap_spi_write(void *context, const void *data, + size_t count) + { +- struct spi_device *spi = context; ++ struct bme680_spi_bus_context *ctx = context; ++ struct spi_device *spi = ctx->spi; ++ int ret; + u8 buf[2]; + + memcpy(buf, data, 2); ++ ++ ret = bme680_regmap_spi_select_page(ctx, buf[0]); ++ if (ret) ++ return ret; ++ + /* + * The SPI register address (= full register address without bit 7) + * and the write command (bit7 = RW = '0') + */ + buf[0] &= ~0x80; + +- return spi_write_then_read(spi, buf, 2, NULL, 0); ++ return spi_write(spi, buf, 2); + } + + static int bme680_regmap_spi_read(void *context, const void *reg, + size_t reg_size, void *val, size_t val_size) + { +- struct spi_device *spi = context; ++ struct bme680_spi_bus_context *ctx = context; ++ struct spi_device *spi = ctx->spi; ++ int ret; ++ u8 addr = *(const u8 *)reg; ++ ++ ret = bme680_regmap_spi_select_page(ctx, addr); ++ if (ret) ++ return ret; + +- return spi_write_then_read(spi, reg, reg_size, val, val_size); ++ addr |= 0x80; /* bit7 = RW = '1' */ ++ ++ return spi_write_then_read(spi, &addr, 1, val, val_size); + } + + static struct regmap_bus bme680_regmap_bus = { +@@ -45,8 +110,8 @@ static struct regmap_bus bme680_regmap_bus = { + static int bme680_spi_probe(struct spi_device *spi) + { + const struct spi_device_id *id = spi_get_device_id(spi); ++ struct bme680_spi_bus_context *bus_context; + struct regmap *regmap; +- unsigned int val; + int ret; + + spi->bits_per_word = 8; +@@ -56,45 +121,21 @@ static int bme680_spi_probe(struct spi_device *spi) + return ret; + } + ++ bus_context = devm_kzalloc(&spi->dev, sizeof(*bus_context), GFP_KERNEL); ++ if (!bus_context) ++ return -ENOMEM; ++ ++ bus_context->spi = spi; ++ bus_context->current_page = 0xff; /* Undefined on warm boot */ ++ + regmap = devm_regmap_init(&spi->dev, &bme680_regmap_bus, +- &spi->dev, &bme680_regmap_config); ++ bus_context, &bme680_regmap_config); + if (IS_ERR(regmap)) { + dev_err(&spi->dev, "Failed to register spi regmap %d\n", + (int)PTR_ERR(regmap)); + return PTR_ERR(regmap); + } + +- ret = regmap_write(regmap, BME680_REG_SOFT_RESET_SPI, +- BME680_CMD_SOFTRESET); +- if (ret < 0) { +- dev_err(&spi->dev, "Failed to reset chip\n"); +- return ret; +- } +- +- /* after power-on reset, Page 0(0x80-0xFF) of spi_mem_page is active */ +- ret = regmap_read(regmap, BME680_REG_CHIP_SPI_ID, &val); +- if (ret < 0) { +- dev_err(&spi->dev, "Error reading SPI chip ID\n"); +- return ret; +- } +- +- if (val != BME680_CHIP_ID_VAL) { +- dev_err(&spi->dev, "Wrong chip ID, got %x expected %x\n", +- val, BME680_CHIP_ID_VAL); +- return -ENODEV; +- } +- /* +- * select Page 1 of spi_mem_page to enable access to +- * to registers from address 0x00 to 0x7F. +- */ +- ret = regmap_write_bits(regmap, BME680_REG_STATUS, +- BME680_SPI_MEM_PAGE_BIT, +- BME680_SPI_MEM_PAGE_1_VAL); +- if (ret < 0) { +- dev_err(&spi->dev, "failed to set page 1 of spi_mem_page\n"); +- return ret; +- } +- + return bme680_core_probe(&spi->dev, regmap, id->name); + } + +diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c +index 89cb0066a6e0..8d76afb87d87 100644 +--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c ++++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c +@@ -103,9 +103,10 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev, + * Do not use IIO_DEGREE_TO_RAD to avoid precision + * loss. Round to the nearest integer. + */ +- *val = div_s64(val64 * 314159 + 9000000ULL, 1000); +- *val2 = 18000 << (CROS_EC_SENSOR_BITS - 1); +- ret = IIO_VAL_FRACTIONAL; ++ *val = 0; ++ *val2 = div_s64(val64 * 3141592653ULL, ++ 180 << (CROS_EC_SENSOR_BITS - 1)); ++ ret = IIO_VAL_INT_PLUS_NANO; + break; + case MOTIONSENSE_TYPE_MAG: + /* +diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c +index 6d71fd905e29..c701a45469f6 100644 +--- a/drivers/iio/dac/mcp4725.c ++++ b/drivers/iio/dac/mcp4725.c +@@ -92,6 +92,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev, + + inoutbuf[0] = 0x60; /* write EEPROM */ + inoutbuf[0] |= data->ref_mode << 3; ++ inoutbuf[0] |= data->powerdown ? ((data->powerdown_mode + 1) << 1) : 0; + inoutbuf[1] = data->dac_value >> 4; + inoutbuf[2] = (data->dac_value & 0xf) << 4; + +diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c +index 63ca31628a93..92c07ab826eb 100644 +--- a/drivers/iio/gyro/bmg160_core.c ++++ b/drivers/iio/gyro/bmg160_core.c +@@ -582,11 +582,10 @@ static int bmg160_read_raw(struct iio_dev *indio_dev, + case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: + return bmg160_get_filter(data, val); + case IIO_CHAN_INFO_SCALE: +- *val = 0; + switch (chan->type) { + case IIO_TEMP: +- *val2 = 500000; +- return IIO_VAL_INT_PLUS_MICRO; ++ *val = 500; ++ return IIO_VAL_INT; + case IIO_ANGL_VEL: + { + int i; +@@ -594,6 +593,7 @@ static int bmg160_read_raw(struct iio_dev *indio_dev, + for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) { + if (bmg160_scale_table[i].dps_range == + data->dps_range) { ++ *val = 0; + *val2 = bmg160_scale_table[i].scale; + return IIO_VAL_INT_PLUS_MICRO; + } +diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c +index 77fac81a3adc..5ddebede31a6 100644 +--- a/drivers/iio/gyro/mpu3050-core.c ++++ b/drivers/iio/gyro/mpu3050-core.c +@@ -29,7 +29,8 @@ + + #include "mpu3050.h" + +-#define MPU3050_CHIP_ID 0x69 ++#define MPU3050_CHIP_ID 0x68 ++#define MPU3050_CHIP_ID_MASK 0x7E + + /* + * Register map: anything suffixed *_H is a big-endian high byte and always +@@ -1176,8 +1177,9 @@ int mpu3050_common_probe(struct device *dev, + goto err_power_down; + } + +- if (val != MPU3050_CHIP_ID) { +- dev_err(dev, "unsupported chip id %02x\n", (u8)val); ++ if ((val & MPU3050_CHIP_ID_MASK) != MPU3050_CHIP_ID) { ++ dev_err(dev, "unsupported chip id %02x\n", ++ (u8)(val & MPU3050_CHIP_ID_MASK)); + ret = -ENODEV; + goto err_power_down; + } +diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c +index cd5bfe39591b..dadd921a4a30 100644 +--- a/drivers/iio/industrialio-buffer.c ++++ b/drivers/iio/industrialio-buffer.c +@@ -320,9 +320,8 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev, + const unsigned long *mask; + unsigned long *trialmask; + +- trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength), +- sizeof(*trialmask), +- GFP_KERNEL); ++ trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), ++ sizeof(*trialmask), GFP_KERNEL); + if (trialmask == NULL) + return -ENOMEM; + if (!indio_dev->masklength) { +diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c +index 4f5cd9f60870..5b65750ce775 100644 +--- a/drivers/iio/industrialio-core.c ++++ b/drivers/iio/industrialio-core.c +@@ -1738,10 +1738,10 @@ EXPORT_SYMBOL(__iio_device_register); + **/ + void iio_device_unregister(struct iio_dev *indio_dev) + { +- mutex_lock(&indio_dev->info_exist_lock); +- + cdev_device_del(&indio_dev->chrdev, &indio_dev->dev); + ++ mutex_lock(&indio_dev->info_exist_lock); ++ + iio_device_unregister_debugfs(indio_dev); + + iio_disable_all_buffers(indio_dev); +diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c +index 5f366838b7ff..e2a4570a47e8 100644 +--- a/drivers/infiniband/core/uverbs_main.c ++++ b/drivers/infiniband/core/uverbs_main.c +@@ -992,6 +992,8 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) + * will only be one mm, so no big deal. + */ + down_write(&mm->mmap_sem); ++ if (!mmget_still_valid(mm)) ++ goto skip_mm; + mutex_lock(&ufile->umap_lock); + list_for_each_entry_safe (priv, next_priv, &ufile->umaps, + list) { +@@ -1006,6 +1008,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) + vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE); + } + mutex_unlock(&ufile->umap_lock); ++ skip_mm: + up_write(&mm->mmap_sem); + mmput(mm); + } +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c +index 628ef617bb2f..f9525d6f0bfe 100644 +--- a/drivers/input/mouse/elan_i2c_core.c ++++ b/drivers/input/mouse/elan_i2c_core.c +@@ -1339,21 +1339,46 @@ static const struct acpi_device_id elan_acpi_id[] = { + { "ELAN0600", 0 }, + { "ELAN0601", 0 }, + { "ELAN0602", 0 }, ++ { "ELAN0603", 0 }, ++ { "ELAN0604", 0 }, + { "ELAN0605", 0 }, ++ { "ELAN0606", 0 }, ++ { "ELAN0607", 0 }, + { "ELAN0608", 0 }, + { "ELAN0609", 0 }, + { "ELAN060B", 0 }, + { "ELAN060C", 0 }, ++ { "ELAN060F", 0 }, ++ { "ELAN0610", 0 }, + { "ELAN0611", 0 }, + { "ELAN0612", 0 }, ++ { "ELAN0615", 0 }, ++ { "ELAN0616", 0 }, + { "ELAN0617", 0 }, + { "ELAN0618", 0 }, ++ { "ELAN0619", 0 }, ++ { "ELAN061A", 0 }, ++ { "ELAN061B", 0 }, + { "ELAN061C", 0 }, + { "ELAN061D", 0 }, + { "ELAN061E", 0 }, ++ { "ELAN061F", 0 }, + { "ELAN0620", 0 }, + { "ELAN0621", 0 }, + { "ELAN0622", 0 }, ++ { "ELAN0623", 0 }, ++ { "ELAN0624", 0 }, ++ { "ELAN0625", 0 }, ++ { "ELAN0626", 0 }, ++ { "ELAN0627", 0 }, ++ { "ELAN0628", 0 }, ++ { "ELAN0629", 0 }, ++ { "ELAN062A", 0 }, ++ { "ELAN062B", 0 }, ++ { "ELAN062C", 0 }, ++ { "ELAN062D", 0 }, ++ { "ELAN0631", 0 }, ++ { "ELAN0632", 0 }, + { "ELAN1000", 0 }, + { } + }; +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index 537c90c8eb0a..f89fc6ea6078 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -3214,8 +3214,12 @@ static int bond_netdev_event(struct notifier_block *this, + return NOTIFY_DONE; + + if (event_dev->flags & IFF_MASTER) { ++ int ret; ++ + netdev_dbg(event_dev, "IFF_MASTER\n"); +- return bond_master_netdev_event(event, event_dev); ++ ret = bond_master_netdev_event(event, event_dev); ++ if (ret != NOTIFY_DONE) ++ return ret; + } + + if (event_dev->flags & IFF_SLAVE) { +diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c +index d4ee9f9c8c34..36263c77df46 100644 +--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c ++++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c +@@ -32,6 +32,13 @@ + #define DRV_NAME "nicvf" + #define DRV_VERSION "1.0" + ++/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs ++ * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed ++ * this value, keeping headroom for the 14 byte Ethernet header and two ++ * VLAN tags (for QinQ) ++ */ ++#define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2) ++ + /* Supported devices */ + static const struct pci_device_id nicvf_id_table[] = { + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, +@@ -1582,6 +1589,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) + struct nicvf *nic = netdev_priv(netdev); + int orig_mtu = netdev->mtu; + ++ /* For now just support only the usual MTU sized frames, ++ * plus some headroom for VLAN, QinQ. ++ */ ++ if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) { ++ netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", ++ netdev->mtu); ++ return -EINVAL; ++ } ++ + netdev->mtu = new_mtu; + + if (!netif_running(netdev)) +@@ -1830,8 +1846,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) + bool bpf_attached = false; + int ret = 0; + +- /* For now just support only the usual MTU sized frames */ +- if (prog && (dev->mtu > 1500)) { ++ /* For now just support only the usual MTU sized frames, ++ * plus some headroom for VLAN, QinQ. ++ */ ++ if (prog && dev->mtu > MAX_XDP_MTU) { + netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", + dev->mtu); + return -EOPNOTSUPP; +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c +index 697c2427f2b7..a96ad20ee484 100644 +--- a/drivers/net/ethernet/freescale/fec_main.c ++++ b/drivers/net/ethernet/freescale/fec_main.c +@@ -1840,13 +1840,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) + int ret; + + if (enable) { +- ret = clk_prepare_enable(fep->clk_ahb); +- if (ret) +- return ret; +- + ret = clk_prepare_enable(fep->clk_enet_out); + if (ret) +- goto failed_clk_enet_out; ++ return ret; + + if (fep->clk_ptp) { + mutex_lock(&fep->ptp_clk_mutex); +@@ -1866,7 +1862,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) + + phy_reset_after_clk_enable(ndev->phydev); + } else { +- clk_disable_unprepare(fep->clk_ahb); + clk_disable_unprepare(fep->clk_enet_out); + if (fep->clk_ptp) { + mutex_lock(&fep->ptp_clk_mutex); +@@ -1885,8 +1880,6 @@ failed_clk_ref: + failed_clk_ptp: + if (fep->clk_enet_out) + clk_disable_unprepare(fep->clk_enet_out); +-failed_clk_enet_out: +- clk_disable_unprepare(fep->clk_ahb); + + return ret; + } +@@ -3470,6 +3463,9 @@ fec_probe(struct platform_device *pdev) + ret = clk_prepare_enable(fep->clk_ipg); + if (ret) + goto failed_clk_ipg; ++ ret = clk_prepare_enable(fep->clk_ahb); ++ if (ret) ++ goto failed_clk_ahb; + + fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); + if (!IS_ERR(fep->reg_phy)) { +@@ -3563,6 +3559,9 @@ failed_reset: + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + failed_regulator: ++ clk_disable_unprepare(fep->clk_ahb); ++failed_clk_ahb: ++ clk_disable_unprepare(fep->clk_ipg); + failed_clk_ipg: + fec_enet_clk_enable(ndev, false); + failed_clk: +@@ -3686,6 +3685,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev) + struct net_device *ndev = dev_get_drvdata(dev); + struct fec_enet_private *fep = netdev_priv(ndev); + ++ clk_disable_unprepare(fep->clk_ahb); + clk_disable_unprepare(fep->clk_ipg); + + return 0; +@@ -3695,8 +3695,20 @@ static int __maybe_unused fec_runtime_resume(struct device *dev) + { + struct net_device *ndev = dev_get_drvdata(dev); + struct fec_enet_private *fep = netdev_priv(ndev); ++ int ret; + +- return clk_prepare_enable(fep->clk_ipg); ++ ret = clk_prepare_enable(fep->clk_ahb); ++ if (ret) ++ return ret; ++ ret = clk_prepare_enable(fep->clk_ipg); ++ if (ret) ++ goto failed_clk_ipg; ++ ++ return 0; ++ ++failed_clk_ipg: ++ clk_disable_unprepare(fep->clk_ahb); ++ return ret; + } + + static const struct dev_pm_ops fec_pm_ops = { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +index f3c7ab6faea5..b8521e2f64ac 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +@@ -39,6 +39,10 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv, + return -EOPNOTSUPP; + } + ++ if (!(mlx5e_eswitch_rep(*out_dev) && ++ mlx5e_is_uplink_rep(netdev_priv(*out_dev)))) ++ return -EOPNOTSUPP; ++ + return 0; + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +index e6099f51d25f..3b9e5f0d0212 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +@@ -1665,7 +1665,8 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) + struct mlx5e_channel *c; + int i; + +- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) ++ if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || ++ priv->channels.params.xdp_prog) + return 0; + + for (i = 0; i < channels->num; i++) { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 93e50ccd44c3..0cb19e4dd439 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -950,7 +950,11 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, + if (params->rx_dim_enabled) + __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); + +- if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE)) ++ /* We disable csum_complete when XDP is enabled since ++ * XDP programs might manipulate packets which will render ++ * skb->checksum incorrect. ++ */ ++ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp) + __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); + + return 0; +@@ -4570,7 +4574,7 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, + { + enum mlx5e_traffic_types tt; + +- rss_params->hfunc = ETH_RSS_HASH_XOR; ++ rss_params->hfunc = ETH_RSS_HASH_TOP; + netdev_rss_key_fill(rss_params->toeplitz_hash_key, + sizeof(rss_params->toeplitz_hash_key)); + mlx5e_build_default_indir_rqt(rss_params->indirection_rqt, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index f86e4804e83e..2cbda8abd8b9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -693,7 +693,14 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth, + { + *proto = ((struct ethhdr *)skb->data)->h_proto; + *proto = __vlan_get_protocol(skb, *proto, network_depth); +- return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6)); ++ ++ if (*proto == htons(ETH_P_IP)) ++ return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr)); ++ ++ if (*proto == htons(ETH_P_IPV6)) ++ return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr)); ++ ++ return false; + } + + static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) +@@ -713,17 +720,6 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) + rq->stats->ecn_mark += !!rc; + } + +-static u32 mlx5e_get_fcs(const struct sk_buff *skb) +-{ +- const void *fcs_bytes; +- u32 _fcs_bytes; +- +- fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN, +- ETH_FCS_LEN, &_fcs_bytes); +- +- return __get_unaligned_cpu32(fcs_bytes); +-} +- + static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) + { + void *ip_p = skb->data + network_depth; +@@ -734,6 +730,68 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) + + #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) + ++#define MAX_PADDING 8 ++ ++static void ++tail_padding_csum_slow(struct sk_buff *skb, int offset, int len, ++ struct mlx5e_rq_stats *stats) ++{ ++ stats->csum_complete_tail_slow++; ++ skb->csum = csum_block_add(skb->csum, ++ skb_checksum(skb, offset, len, 0), ++ offset); ++} ++ ++static void ++tail_padding_csum(struct sk_buff *skb, int offset, ++ struct mlx5e_rq_stats *stats) ++{ ++ u8 tail_padding[MAX_PADDING]; ++ int len = skb->len - offset; ++ void *tail; ++ ++ if (unlikely(len > MAX_PADDING)) { ++ tail_padding_csum_slow(skb, offset, len, stats); ++ return; ++ } ++ ++ tail = skb_header_pointer(skb, offset, len, tail_padding); ++ if (unlikely(!tail)) { ++ tail_padding_csum_slow(skb, offset, len, stats); ++ return; ++ } ++ ++ stats->csum_complete_tail++; ++ skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset); ++} ++ ++static void ++mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto, ++ struct mlx5e_rq_stats *stats) ++{ ++ struct ipv6hdr *ip6; ++ struct iphdr *ip4; ++ int pkt_len; ++ ++ switch (proto) { ++ case htons(ETH_P_IP): ++ ip4 = (struct iphdr *)(skb->data + network_depth); ++ pkt_len = network_depth + ntohs(ip4->tot_len); ++ break; ++ case htons(ETH_P_IPV6): ++ ip6 = (struct ipv6hdr *)(skb->data + network_depth); ++ pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len); ++ break; ++ default: ++ return; ++ } ++ ++ if (likely(pkt_len >= skb->len)) ++ return; ++ ++ tail_padding_csum(skb, pkt_len, stats); ++} ++ + static inline void mlx5e_handle_csum(struct net_device *netdev, + struct mlx5_cqe64 *cqe, + struct mlx5e_rq *rq, +@@ -753,7 +811,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, + return; + } + +- if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))) ++ /* True when explicitly set via priv flag, or XDP prog is loaded */ ++ if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)) + goto csum_unnecessary; + + /* CQE csum doesn't cover padding octets in short ethernet +@@ -781,18 +840,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, + skb->csum = csum_partial(skb->data + ETH_HLEN, + network_depth - ETH_HLEN, + skb->csum); +- if (unlikely(netdev->features & NETIF_F_RXFCS)) +- skb->csum = csum_block_add(skb->csum, +- (__force __wsum)mlx5e_get_fcs(skb), +- skb->len - ETH_FCS_LEN); ++ ++ mlx5e_skb_padding_csum(skb, network_depth, proto, stats); + stats->csum_complete++; + return; + } + + csum_unnecessary: + if (likely((cqe->hds_ip_ext & CQE_L3_OK) && +- ((cqe->hds_ip_ext & CQE_L4_OK) || +- (get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) { ++ (cqe->hds_ip_ext & CQE_L4_OK))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + if (cqe_is_tunneled(cqe)) { + skb->csum_level = 1; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +index d3fe48ff9da9..4461b44acafc 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +@@ -59,6 +59,8 @@ static const struct counter_desc sw_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, ++ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) }, ++ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, +@@ -151,6 +153,8 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) + s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; + s->rx_csum_none += rq_stats->csum_none; + s->rx_csum_complete += rq_stats->csum_complete; ++ s->rx_csum_complete_tail += rq_stats->csum_complete_tail; ++ s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow; + s->rx_csum_unnecessary += rq_stats->csum_unnecessary; + s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; + s->rx_xdp_drop += rq_stats->xdp_drop; +@@ -1192,6 +1196,8 @@ static const struct counter_desc rq_stats_desc[] = { + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, ++ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) }, ++ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +index fe91ec06e3c7..714303bf0797 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +@@ -71,6 +71,8 @@ struct mlx5e_sw_stats { + u64 rx_csum_unnecessary; + u64 rx_csum_none; + u64 rx_csum_complete; ++ u64 rx_csum_complete_tail; ++ u64 rx_csum_complete_tail_slow; + u64 rx_csum_unnecessary_inner; + u64 rx_xdp_drop; + u64 rx_xdp_redirect; +@@ -181,6 +183,8 @@ struct mlx5e_rq_stats { + u64 packets; + u64 bytes; + u64 csum_complete; ++ u64 csum_complete_tail; ++ u64 csum_complete_tail_slow; + u64 csum_unnecessary; + u64 csum_unnecessary_inner; + u64 csum_none; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c +index 8de64e88c670..22a2ef111514 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c +@@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, + return ret; + } + +-static void mlx5_fpga_tls_release_swid(struct idr *idr, +- spinlock_t *idr_spinlock, u32 swid) ++static void *mlx5_fpga_tls_release_swid(struct idr *idr, ++ spinlock_t *idr_spinlock, u32 swid) + { + unsigned long flags; ++ void *ptr; + + spin_lock_irqsave(idr_spinlock, flags); +- idr_remove(idr, swid); ++ ptr = idr_remove(idr, swid); + spin_unlock_irqrestore(idr_spinlock, flags); ++ return ptr; + } + + static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, +@@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, + kfree(buf); + } + +-struct mlx5_teardown_stream_context { +- struct mlx5_fpga_tls_command_context cmd; +- u32 swid; +-}; +- + static void + mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, + struct mlx5_fpga_device *fdev, + struct mlx5_fpga_tls_command_context *cmd, + struct mlx5_fpga_dma_buf *resp) + { +- struct mlx5_teardown_stream_context *ctx = +- container_of(cmd, struct mlx5_teardown_stream_context, cmd); +- + if (resp) { + u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); + +@@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, + mlx5_fpga_err(fdev, + "Teardown stream failed with syndrome = %d", + syndrome); +- else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx)) +- mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr, +- &fdev->tls->tx_idr_spinlock, +- ctx->swid); +- else +- mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr, +- &fdev->tls->rx_idr_spinlock, +- ctx->swid); + } + mlx5_fpga_tls_put_command_ctx(cmd); + } +@@ -217,22 +203,22 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, + void *cmd; + int ret; + +- rcu_read_lock(); +- flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); +- rcu_read_unlock(); +- +- if (!flow) { +- WARN_ONCE(1, "Received NULL pointer for handle\n"); +- return -EINVAL; +- } +- + buf = kzalloc(size, GFP_ATOMIC); + if (!buf) + return -ENOMEM; + + cmd = (buf + 1); + ++ rcu_read_lock(); ++ flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); ++ if (unlikely(!flow)) { ++ rcu_read_unlock(); ++ WARN_ONCE(1, "Received NULL pointer for handle\n"); ++ kfree(buf); ++ return -EINVAL; ++ } + mlx5_fpga_tls_flow_to_cmd(flow, cmd); ++ rcu_read_unlock(); + + MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); + MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn)); +@@ -253,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, + static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, + void *flow, u32 swid, gfp_t flags) + { +- struct mlx5_teardown_stream_context *ctx; ++ struct mlx5_fpga_tls_command_context *ctx; + struct mlx5_fpga_dma_buf *buf; + void *cmd; + +@@ -261,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, + if (!ctx) + return; + +- buf = &ctx->cmd.buf; ++ buf = &ctx->buf; + cmd = (ctx + 1); + MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM); + MLX5_SET(tls_cmd, cmd, swid, swid); +@@ -272,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, + buf->sg[0].data = cmd; + buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; + +- ctx->swid = swid; +- mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd, ++ mlx5_fpga_tls_cmd_send(mdev->fpga, ctx, + mlx5_fpga_tls_teardown_completion); + } + +@@ -283,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, + struct mlx5_fpga_tls *tls = mdev->fpga->tls; + void *flow; + +- rcu_read_lock(); + if (direction_sx) +- flow = idr_find(&tls->tx_idr, swid); ++ flow = mlx5_fpga_tls_release_swid(&tls->tx_idr, ++ &tls->tx_idr_spinlock, ++ swid); + else +- flow = idr_find(&tls->rx_idr, swid); +- +- rcu_read_unlock(); ++ flow = mlx5_fpga_tls_release_swid(&tls->rx_idr, ++ &tls->rx_idr_spinlock, ++ swid); + + if (!flow) { + mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n", +@@ -297,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, + return; + } + ++ synchronize_rcu(); /* before kfree(flow) */ + mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags); + } + +diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c +index ddedf8ab5b64..fc643fde5a4a 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/core.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/core.c +@@ -568,7 +568,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) + if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) + return 0; + +- emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0); ++ emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0); + if (!emad_wq) + return -ENOMEM; + mlxsw_core->emad_wq = emad_wq; +@@ -1912,10 +1912,10 @@ static int __init mlxsw_core_module_init(void) + { + int err; + +- mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0); ++ mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0); + if (!mlxsw_wq) + return -ENOMEM; +- mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM, ++ mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0, + mlxsw_core_driver_name); + if (!mlxsw_owq) { + err = -ENOMEM; +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +index 98e5ffd71b91..2f6afbfd689f 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +@@ -6745,7 +6745,7 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp, + /* A RIF is not created for macvlan netdevs. Their MAC is used to + * populate the FDB + */ +- if (netif_is_macvlan(dev)) ++ if (netif_is_macvlan(dev) || netif_is_l3_master(dev)) + return 0; + + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +index c772109b638d..f5a10e286400 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +@@ -1654,7 +1654,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid_index; + int err = 0; + +- if (switchdev_trans_ph_prepare(trans)) ++ if (switchdev_trans_ph_commit(trans)) + return 0; + + bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); +diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c +index 8d54b36afee8..2bbc5b8f92c2 100644 +--- a/drivers/net/ethernet/netronome/nfp/flower/action.c ++++ b/drivers/net/ethernet/netronome/nfp/flower/action.c +@@ -49,8 +49,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, + + tmp_push_vlan_tci = + FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) | +- FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) | +- NFP_FL_PUSH_VLAN_CFI; ++ FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)); + push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); + } + +diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +index 15f41cfef9f1..ab07d76b4186 100644 +--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h ++++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +@@ -26,7 +26,7 @@ + #define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6) + + #define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13) +-#define NFP_FLOWER_MASK_VLAN_CFI BIT(12) ++#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12) + #define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0) + + #define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12) +@@ -82,7 +82,6 @@ + #define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0) + + #define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13) +-#define NFP_FL_PUSH_VLAN_CFI BIT(12) + #define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0) + + #define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff) +diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c +index cdf75595f627..571cc8ced33e 100644 +--- a/drivers/net/ethernet/netronome/nfp/flower/match.c ++++ b/drivers/net/ethernet/netronome/nfp/flower/match.c +@@ -26,14 +26,12 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame, + FLOW_DISSECTOR_KEY_VLAN, + target); + /* Populate the tci field. */ +- if (flow_vlan->vlan_id || flow_vlan->vlan_priority) { +- tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, +- flow_vlan->vlan_priority) | +- FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, +- flow_vlan->vlan_id) | +- NFP_FLOWER_MASK_VLAN_CFI; +- frame->tci = cpu_to_be16(tmp_tci); +- } ++ tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; ++ tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, ++ flow_vlan->vlan_priority) | ++ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, ++ flow_vlan->vlan_id); ++ frame->tci = cpu_to_be16(tmp_tci); + } + } + +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c +index 6ce3f666d142..1283632091d5 100644 +--- a/drivers/net/team/team.c ++++ b/drivers/net/team/team.c +@@ -1247,6 +1247,23 @@ static int team_port_add(struct team *team, struct net_device *port_dev, + goto err_option_port_add; + } + ++ /* set promiscuity level to new slave */ ++ if (dev->flags & IFF_PROMISC) { ++ err = dev_set_promiscuity(port_dev, 1); ++ if (err) ++ goto err_set_slave_promisc; ++ } ++ ++ /* set allmulti level to new slave */ ++ if (dev->flags & IFF_ALLMULTI) { ++ err = dev_set_allmulti(port_dev, 1); ++ if (err) { ++ if (dev->flags & IFF_PROMISC) ++ dev_set_promiscuity(port_dev, -1); ++ goto err_set_slave_promisc; ++ } ++ } ++ + netif_addr_lock_bh(dev); + dev_uc_sync_multiple(port_dev, dev); + dev_mc_sync_multiple(port_dev, dev); +@@ -1263,6 +1280,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev, + + return 0; + ++err_set_slave_promisc: ++ __team_option_inst_del_port(team, port); ++ + err_option_port_add: + team_upper_dev_unlink(team, port); + +@@ -1308,6 +1328,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev) + + team_port_disable(team, port); + list_del_rcu(&port->list); ++ ++ if (dev->flags & IFF_PROMISC) ++ dev_set_promiscuity(port_dev, -1); ++ if (dev->flags & IFF_ALLMULTI) ++ dev_set_allmulti(port_dev, -1); ++ + team_upper_dev_unlink(team, port); + netdev_rx_handler_unregister(port_dev); + team_port_disable_netpoll(port); +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +index 7c9dfa54fee8..9678322aca60 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c ++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +@@ -421,7 +421,6 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, + return; + + rcu_read_lock(); +- mt76_tx_status_lock(mdev, &list); + + if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid)) + wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]); +@@ -434,6 +433,8 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, + drv_priv); + } + ++ mt76_tx_status_lock(mdev, &list); ++ + if (wcid) { + if (stat->pktid) + status.skb = mt76_tx_status_skb_get(mdev, wcid, +@@ -453,7 +454,9 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, + if (*update == 0 && stat_val == stat_cache && + stat->wcid == msta->status.wcid && msta->n_frames < 32) { + msta->n_frames++; +- goto out; ++ mt76_tx_status_unlock(mdev, &list); ++ rcu_read_unlock(); ++ return; + } + + mt76x02_mac_fill_tx_status(dev, status.info, &msta->status, +@@ -469,11 +472,10 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, + + if (status.skb) + mt76_tx_status_skb_done(mdev, status.skb, &list); +- else +- ieee80211_tx_status_ext(mt76_hw(dev), &status); +- +-out: + mt76_tx_status_unlock(mdev, &list); ++ ++ if (!status.skb) ++ ieee80211_tx_status_ext(mt76_hw(dev), &status); + rcu_read_unlock(); + } + +diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h +index 4b1744e9fb78..50b92ca92bd7 100644 +--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h ++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h +@@ -673,7 +673,6 @@ enum rt2x00_state_flags { + CONFIG_CHANNEL_HT40, + CONFIG_POWERSAVING, + CONFIG_HT_DISABLED, +- CONFIG_QOS_DISABLED, + CONFIG_MONITORING, + + /* +diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +index 2825560e2424..e8462f25d252 100644 +--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c ++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +@@ -642,18 +642,8 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, + rt2x00dev->intf_associated--; + + rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated); +- +- clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags); + } + +- /* +- * Check for access point which do not support 802.11e . We have to +- * generate data frames sequence number in S/W for such AP, because +- * of H/W bug. +- */ +- if (changes & BSS_CHANGED_QOS && !bss_conf->qos) +- set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags); +- + /* + * When the erp information has changed, we should perform + * additional configuration steps. For all other changes we are done. +diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +index 92ddc19e7bf7..4834b4eb0206 100644 +--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c ++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +@@ -201,15 +201,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, + if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) { + /* + * rt2800 has a H/W (or F/W) bug, device incorrectly increase +- * seqno on retransmited data (non-QOS) frames. To workaround +- * the problem let's generate seqno in software if QOS is +- * disabled. ++ * seqno on retransmitted data (non-QOS) and management frames. ++ * To workaround the problem let's generate seqno in software. ++ * Except for beacons which are transmitted periodically by H/W ++ * hence hardware has to assign seqno for them. + */ +- if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags)) +- __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); +- else ++ if (ieee80211_is_beacon(hdr->frame_control)) { ++ __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); + /* H/W will generate sequence number */ + return; ++ } ++ ++ __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); + } + + /* +diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c +index dfba4921b265..5bf61431434b 100644 +--- a/drivers/scsi/libfc/fc_rport.c ++++ b/drivers/scsi/libfc/fc_rport.c +@@ -2162,7 +2162,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) + FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", + fc_rport_state(rdata)); + +- rdata->flags &= ~FC_RP_STARTED; + fc_rport_enter_delete(rdata, RPORT_EV_STOP); + mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, fc_rport_destroy); +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c +index 655ad26106e4..5c78710b713f 100644 +--- a/drivers/scsi/scsi_lib.c ++++ b/drivers/scsi/scsi_lib.c +@@ -1763,8 +1763,12 @@ out_put_budget: + ret = BLK_STS_DEV_RESOURCE; + break; + default: ++ if (unlikely(!scsi_device_online(sdev))) ++ scsi_req(req)->result = DID_NO_CONNECT << 16; ++ else ++ scsi_req(req)->result = DID_ERROR << 16; + /* +- * Make sure to release all allocated ressources when ++ * Make sure to release all allocated resources when + * we hit an error, as we will never see this command + * again. + */ +diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c +index 808ed92ed66f..1bb1cb651349 100644 +--- a/drivers/staging/comedi/drivers/ni_usb6501.c ++++ b/drivers/staging/comedi/drivers/ni_usb6501.c +@@ -463,10 +463,8 @@ static int ni6501_alloc_usb_buffers(struct comedi_device *dev) + + size = usb_endpoint_maxp(devpriv->ep_tx); + devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); +- if (!devpriv->usb_tx_buf) { +- kfree(devpriv->usb_rx_buf); ++ if (!devpriv->usb_tx_buf) + return -ENOMEM; +- } + + return 0; + } +@@ -518,6 +516,9 @@ static int ni6501_auto_attach(struct comedi_device *dev, + if (!devpriv) + return -ENOMEM; + ++ mutex_init(&devpriv->mut); ++ usb_set_intfdata(intf, devpriv); ++ + ret = ni6501_find_endpoints(dev); + if (ret) + return ret; +@@ -526,9 +527,6 @@ static int ni6501_auto_attach(struct comedi_device *dev, + if (ret) + return ret; + +- mutex_init(&devpriv->mut); +- usb_set_intfdata(intf, devpriv); +- + ret = comedi_alloc_subdevices(dev, 2); + if (ret) + return ret; +diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c +index 6234b649d887..65dc6c51037e 100644 +--- a/drivers/staging/comedi/drivers/vmk80xx.c ++++ b/drivers/staging/comedi/drivers/vmk80xx.c +@@ -682,10 +682,8 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev) + + size = usb_endpoint_maxp(devpriv->ep_tx); + devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); +- if (!devpriv->usb_tx_buf) { +- kfree(devpriv->usb_rx_buf); ++ if (!devpriv->usb_tx_buf) + return -ENOMEM; +- } + + return 0; + } +@@ -800,6 +798,8 @@ static int vmk80xx_auto_attach(struct comedi_device *dev, + + devpriv->model = board->model; + ++ sema_init(&devpriv->limit_sem, 8); ++ + ret = vmk80xx_find_usb_endpoints(dev); + if (ret) + return ret; +@@ -808,8 +808,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev, + if (ret) + return ret; + +- sema_init(&devpriv->limit_sem, 8); +- + usb_set_intfdata(intf, devpriv); + + if (devpriv->model == VMK8055_MODEL) +diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c +index acdbc07fd259..2fc8bc22b57b 100644 +--- a/drivers/staging/iio/adc/ad7192.c ++++ b/drivers/staging/iio/adc/ad7192.c +@@ -109,10 +109,10 @@ + #define AD7192_CH_AIN3 BIT(6) /* AIN3 - AINCOM */ + #define AD7192_CH_AIN4 BIT(7) /* AIN4 - AINCOM */ + +-#define AD7193_CH_AIN1P_AIN2M 0x000 /* AIN1(+) - AIN2(-) */ +-#define AD7193_CH_AIN3P_AIN4M 0x001 /* AIN3(+) - AIN4(-) */ +-#define AD7193_CH_AIN5P_AIN6M 0x002 /* AIN5(+) - AIN6(-) */ +-#define AD7193_CH_AIN7P_AIN8M 0x004 /* AIN7(+) - AIN8(-) */ ++#define AD7193_CH_AIN1P_AIN2M 0x001 /* AIN1(+) - AIN2(-) */ ++#define AD7193_CH_AIN3P_AIN4M 0x002 /* AIN3(+) - AIN4(-) */ ++#define AD7193_CH_AIN5P_AIN6M 0x004 /* AIN5(+) - AIN6(-) */ ++#define AD7193_CH_AIN7P_AIN8M 0x008 /* AIN7(+) - AIN8(-) */ + #define AD7193_CH_TEMP 0x100 /* Temp senseor */ + #define AD7193_CH_AIN2P_AIN2M 0x200 /* AIN2(+) - AIN2(-) */ + #define AD7193_CH_AIN1 0x401 /* AIN1 - AINCOM */ +diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c +index 029c3bf42d4d..07774c000c5a 100644 +--- a/drivers/staging/iio/meter/ade7854.c ++++ b/drivers/staging/iio/meter/ade7854.c +@@ -269,7 +269,7 @@ static IIO_DEV_ATTR_VPEAK(0644, + static IIO_DEV_ATTR_IPEAK(0644, + ade7854_read_32bit, + ade7854_write_32bit, +- ADE7854_VPEAK); ++ ADE7854_IPEAK); + static IIO_DEV_ATTR_APHCAL(0644, + ade7854_read_16bit, + ade7854_write_16bit, +diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c +index 18936cdb1083..956daf8c3bd2 100644 +--- a/drivers/staging/most/core.c ++++ b/drivers/staging/most/core.c +@@ -1431,7 +1431,7 @@ int most_register_interface(struct most_interface *iface) + + INIT_LIST_HEAD(&iface->p->channel_list); + iface->p->dev_id = id; +- snprintf(iface->p->name, STRING_SIZE, "mdev%d", id); ++ strcpy(iface->p->name, iface->description); + iface->dev.init_name = iface->p->name; + iface->dev.bus = &mc.bus; + iface->dev.parent = &mc.dev; +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c +index 93bd90f1ff14..e9a8b79ba77e 100644 +--- a/drivers/tty/serial/sh-sci.c ++++ b/drivers/tty/serial/sh-sci.c +@@ -2497,14 +2497,16 @@ done: + * center of the last stop bit in sampling clocks. + */ + int last_stop = bits * 2 - 1; +- int deviation = min_err * srr * last_stop / 2 / baud; ++ int deviation = DIV_ROUND_CLOSEST(min_err * last_stop * ++ (int)(srr + 1), ++ 2 * (int)baud); + + if (abs(deviation) >= 2) { + /* At least two sampling clocks off at the + * last stop bit; we can increase the error + * margin by shifting the sampling point. + */ +- int shift = min(-8, max(7, deviation / 2)); ++ int shift = clamp(deviation / 2, -8, 7); + + hssrr |= (shift << HSCIF_SRHP_SHIFT) & + HSCIF_SRHP_MASK; +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c +index 9646ff63e77a..b6621a2e916d 100644 +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -1518,7 +1518,8 @@ static void csi_J(struct vc_data *vc, int vpar) + return; + } + scr_memsetw(start, vc->vc_video_erase_char, 2 * count); +- update_region(vc, (unsigned long) start, count); ++ if (con_should_update(vc)) ++ do_update_region(vc, (unsigned long) start, count); + vc->vc_need_wrap = 0; + } + +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c +index a2e5dc7716e2..674cfc5a4084 100644 +--- a/drivers/vhost/vhost.c ++++ b/drivers/vhost/vhost.c +@@ -911,8 +911,12 @@ static int vhost_new_umem_range(struct vhost_umem *umem, + u64 start, u64 size, u64 end, + u64 userspace_addr, int perm) + { +- struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC); ++ struct vhost_umem_node *tmp, *node; + ++ if (!size) ++ return -EFAULT; ++ ++ node = kmalloc(sizeof(*node), GFP_ATOMIC); + if (!node) + return -ENOMEM; + +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h +index 6c934ab3722b..10ead04346ee 100644 +--- a/fs/cifs/cifsglob.h ++++ b/fs/cifs/cifsglob.h +@@ -1303,6 +1303,7 @@ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file) + } + + struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file); ++void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr); + void cifsFileInfo_put(struct cifsFileInfo *cifs_file); + + #define CIFS_CACHE_READ_FLG 1 +@@ -1824,6 +1825,7 @@ GLOBAL_EXTERN spinlock_t gidsidlock; + #endif /* CONFIG_CIFS_ACL */ + + void cifs_oplock_break(struct work_struct *work); ++void cifs_queue_oplock_break(struct cifsFileInfo *cfile); + + extern const struct slow_work_ops cifs_oplock_break_ops; + extern struct workqueue_struct *cifsiod_wq; +diff --git a/fs/cifs/file.c b/fs/cifs/file.c +index 8d107587208f..7c05353b766c 100644 +--- a/fs/cifs/file.c ++++ b/fs/cifs/file.c +@@ -360,12 +360,30 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file) + return cifs_file; + } + +-/* +- * Release a reference on the file private data. This may involve closing +- * the filehandle out on the server. Must be called without holding +- * tcon->open_file_lock and cifs_file->file_info_lock. ++/** ++ * cifsFileInfo_put - release a reference of file priv data ++ * ++ * Always potentially wait for oplock handler. See _cifsFileInfo_put(). + */ + void cifsFileInfo_put(struct cifsFileInfo *cifs_file) ++{ ++ _cifsFileInfo_put(cifs_file, true); ++} ++ ++/** ++ * _cifsFileInfo_put - release a reference of file priv data ++ * ++ * This may involve closing the filehandle @cifs_file out on the ++ * server. Must be called without holding tcon->open_file_lock and ++ * cifs_file->file_info_lock. ++ * ++ * If @wait_for_oplock_handler is true and we are releasing the last ++ * reference, wait for any running oplock break handler of the file ++ * and cancel any pending one. If calling this function from the ++ * oplock break handler, you need to pass false. ++ * ++ */ ++void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler) + { + struct inode *inode = d_inode(cifs_file->dentry); + struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); +@@ -414,7 +432,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) + + spin_unlock(&tcon->open_file_lock); + +- oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break); ++ oplock_break_cancelled = wait_oplock_handler ? ++ cancel_work_sync(&cifs_file->oplock_break) : false; + + if (!tcon->need_reconnect && !cifs_file->invalidHandle) { + struct TCP_Server_Info *server = tcon->ses->server; +@@ -4480,6 +4499,7 @@ void cifs_oplock_break(struct work_struct *work) + cinode); + cifs_dbg(FYI, "Oplock release rc = %d\n", rc); + } ++ _cifsFileInfo_put(cfile, false /* do not wait for ourself */); + cifs_done_oplock_break(cinode); + } + +diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c +index bee203055b30..1e1626a2cfc3 100644 +--- a/fs/cifs/misc.c ++++ b/fs/cifs/misc.c +@@ -501,8 +501,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) + CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, + &pCifsInode->flags); + +- queue_work(cifsoplockd_wq, +- &netfile->oplock_break); ++ cifs_queue_oplock_break(netfile); + netfile->oplock_break_cancelled = false; + + spin_unlock(&tcon->open_file_lock); +@@ -607,6 +606,28 @@ void cifs_put_writer(struct cifsInodeInfo *cinode) + spin_unlock(&cinode->writers_lock); + } + ++/** ++ * cifs_queue_oplock_break - queue the oplock break handler for cfile ++ * ++ * This function is called from the demultiplex thread when it ++ * receives an oplock break for @cfile. ++ * ++ * Assumes the tcon->open_file_lock is held. ++ * Assumes cfile->file_info_lock is NOT held. ++ */ ++void cifs_queue_oplock_break(struct cifsFileInfo *cfile) ++{ ++ /* ++ * Bump the handle refcount now while we hold the ++ * open_file_lock to enforce the validity of it for the oplock ++ * break handler. The matching put is done at the end of the ++ * handler. ++ */ ++ cifsFileInfo_get(cfile); ++ ++ queue_work(cifsoplockd_wq, &cfile->oplock_break); ++} ++ + void cifs_done_oplock_break(struct cifsInodeInfo *cinode) + { + clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags); +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c +index 58700d2ba8cd..0a7ed2e3ad4f 100644 +--- a/fs/cifs/smb2misc.c ++++ b/fs/cifs/smb2misc.c +@@ -555,7 +555,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, + clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, + &cinode->flags); + +- queue_work(cifsoplockd_wq, &cfile->oplock_break); ++ cifs_queue_oplock_break(cfile); + kfree(lw); + return true; + } +@@ -719,8 +719,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) + CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, + &cinode->flags); + spin_unlock(&cfile->file_info_lock); +- queue_work(cifsoplockd_wq, +- &cfile->oplock_break); ++ ++ cifs_queue_oplock_break(cfile); + + spin_unlock(&tcon->open_file_lock); + spin_unlock(&cifs_tcp_ses_lock); +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c +index ea56b1cdbdde..d5434ac0571b 100644 +--- a/fs/cifs/smb2ops.c ++++ b/fs/cifs/smb2ops.c +@@ -2210,6 +2210,8 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, + + rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov, + &resp_buftype); ++ if (!rc) ++ SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); + if (!rc || !err_iov.iov_base) { + rc = -ENOENT; + goto free_path; +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index 068febe37fe4..938e75cc3b66 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -815,8 +815,11 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) + } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) { + /* ops set to 3.0 by default for default so update */ + ses->server->ops = &smb21_operations; +- } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) ++ ses->server->vals = &smb21_values; ++ } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) { + ses->server->ops = &smb311_operations; ++ ses->server->vals = &smb311_values; ++ } + } else if (le16_to_cpu(rsp->DialectRevision) != + ses->server->vals->protocol_id) { + /* if requested single dialect ensure returned dialect matched */ +@@ -3387,8 +3390,6 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, + rqst.rq_nvec = 1; + + rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); +- cifs_small_buf_release(req); +- + rsp = (struct smb2_read_rsp *)rsp_iov.iov_base; + + if (rc) { +@@ -3407,6 +3408,8 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, + io_parms->tcon->tid, ses->Suid, + io_parms->offset, io_parms->length); + ++ cifs_small_buf_release(req); ++ + *nbytes = le32_to_cpu(rsp->DataLength); + if ((*nbytes > CIFS_MAX_MSGSIZE) || + (*nbytes > io_parms->length)) { +@@ -3705,7 +3708,6 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, + + rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst, + &resp_buftype, flags, &rsp_iov); +- cifs_small_buf_release(req); + rsp = (struct smb2_write_rsp *)rsp_iov.iov_base; + + if (rc) { +@@ -3723,6 +3725,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, + io_parms->offset, *nbytes); + } + ++ cifs_small_buf_release(req); + free_rsp_buf(resp_buftype, rsp); + return rc; + } +diff --git a/fs/dax.c b/fs/dax.c +index 05cca2214ae3..827ee143413e 100644 +--- a/fs/dax.c ++++ b/fs/dax.c +@@ -33,6 +33,7 @@ + #include + #include + #include ++#include + #include "internal.h" + + #define CREATE_TRACE_POINTS +@@ -1409,7 +1410,9 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, + { + struct address_space *mapping = vmf->vma->vm_file->f_mapping; + unsigned long pmd_addr = vmf->address & PMD_MASK; ++ struct vm_area_struct *vma = vmf->vma; + struct inode *inode = mapping->host; ++ pgtable_t pgtable = NULL; + struct page *zero_page; + spinlock_t *ptl; + pmd_t pmd_entry; +@@ -1424,12 +1427,22 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, + *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, + DAX_PMD | DAX_ZERO_PAGE, false); + ++ if (arch_needs_pgtable_deposit()) { ++ pgtable = pte_alloc_one(vma->vm_mm); ++ if (!pgtable) ++ return VM_FAULT_OOM; ++ } ++ + ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); + if (!pmd_none(*(vmf->pmd))) { + spin_unlock(ptl); + goto fallback; + } + ++ if (pgtable) { ++ pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); ++ mm_inc_nr_ptes(vma->vm_mm); ++ } + pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); + pmd_entry = pmd_mkhuge(pmd_entry); + set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); +@@ -1438,6 +1451,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, + return VM_FAULT_NOPAGE; + + fallback: ++ if (pgtable) ++ pte_free(vma->vm_mm, pgtable); + trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); + return VM_FAULT_FALLBACK; + } +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index 85b0ef890b28..91bd2ff0c62c 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -1141,6 +1141,24 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, + count = -EINTR; + goto out_mm; + } ++ /* ++ * Avoid to modify vma->vm_flags ++ * without locked ops while the ++ * coredump reads the vm_flags. ++ */ ++ if (!mmget_still_valid(mm)) { ++ /* ++ * Silently return "count" ++ * like if get_task_mm() ++ * failed. FIXME: should this ++ * function have returned ++ * -ESRCH if get_task_mm() ++ * failed like if ++ * get_proc_task() fails? ++ */ ++ up_write(&mm->mmap_sem); ++ goto out_mm; ++ } + for (vma = mm->mmap; vma; vma = vma->vm_next) { + vma->vm_flags &= ~VM_SOFTDIRTY; + vma_set_page_prot(vma); +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c +index 89800fc7dc9d..f5de1e726356 100644 +--- a/fs/userfaultfd.c ++++ b/fs/userfaultfd.c +@@ -629,6 +629,8 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, + + /* the various vma->vm_userfaultfd_ctx still points to it */ + down_write(&mm->mmap_sem); ++ /* no task can run (and in turn coredump) yet */ ++ VM_WARN_ON(!mmget_still_valid(mm)); + for (vma = mm->mmap; vma; vma = vma->vm_next) + if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { + vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; +@@ -883,6 +885,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file) + * taking the mmap_sem for writing. + */ + down_write(&mm->mmap_sem); ++ if (!mmget_still_valid(mm)) ++ goto skip_mm; + prev = NULL; + for (vma = mm->mmap; vma; vma = vma->vm_next) { + cond_resched(); +@@ -905,6 +909,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file) + vma->vm_flags = new_flags; + vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; + } ++skip_mm: + up_write(&mm->mmap_sem); + mmput(mm); + wakeup: +@@ -1333,6 +1338,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, + goto out; + + down_write(&mm->mmap_sem); ++ if (!mmget_still_valid(mm)) ++ goto out_unlock; + vma = find_vma_prev(mm, start, &prev); + if (!vma) + goto out_unlock; +@@ -1520,6 +1527,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, + goto out; + + down_write(&mm->mmap_sem); ++ if (!mmget_still_valid(mm)) ++ goto out_unlock; + vma = find_vma_prev(mm, start, &prev); + if (!vma) + goto out_unlock; +diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h +index e07e91daaacc..72ff78c33033 100644 +--- a/include/linux/kprobes.h ++++ b/include/linux/kprobes.h +@@ -173,6 +173,7 @@ struct kretprobe_instance { + struct kretprobe *rp; + kprobe_opcode_t *ret_addr; + struct task_struct *task; ++ void *fp; + char data[0]; + }; + +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index 848b54b7ec91..7df56decae37 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -1484,6 +1484,7 @@ struct net_device_ops { + * @IFF_FAILOVER: device is a failover master device + * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device + * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device ++ * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running + */ + enum netdev_priv_flags { + IFF_802_1Q_VLAN = 1<<0, +@@ -1516,6 +1517,7 @@ enum netdev_priv_flags { + IFF_FAILOVER = 1<<27, + IFF_FAILOVER_SLAVE = 1<<28, + IFF_L3MDEV_RX_HANDLER = 1<<29, ++ IFF_LIVE_RENAME_OK = 1<<30, + }; + + #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN +@@ -1547,6 +1549,7 @@ enum netdev_priv_flags { + #define IFF_FAILOVER IFF_FAILOVER + #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE + #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER ++#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK + + /** + * struct net_device - The DEVICE structure. +diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h +index 3bfa6a0cbba4..c1dbb737a36c 100644 +--- a/include/linux/sched/mm.h ++++ b/include/linux/sched/mm.h +@@ -49,6 +49,27 @@ static inline void mmdrop(struct mm_struct *mm) + __mmdrop(mm); + } + ++/* ++ * This has to be called after a get_task_mm()/mmget_not_zero() ++ * followed by taking the mmap_sem for writing before modifying the ++ * vmas or anything the coredump pretends not to change from under it. ++ * ++ * NOTE: find_extend_vma() called from GUP context is the only place ++ * that can modify the "mm" (notably the vm_start/end) under mmap_sem ++ * for reading and outside the context of the process, so it is also ++ * the only case that holds the mmap_sem for reading that must call ++ * this function. Generally if the mmap_sem is hold for reading ++ * there's no need of this check after get_task_mm()/mmget_not_zero(). ++ * ++ * This function can be obsoleted and the check can be removed, after ++ * the coredump code will hold the mmap_sem for writing before ++ * invoking the ->core_dump methods. ++ */ ++static inline bool mmget_still_valid(struct mm_struct *mm) ++{ ++ return likely(!mm->core_state); ++} ++ + /** + * mmget() - Pin the address space associated with a &struct mm_struct. + * @mm: The address space to pin. +diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h +index 87499b6b35d6..df5c69db68af 100644 +--- a/include/net/nfc/nci_core.h ++++ b/include/net/nfc/nci_core.h +@@ -166,7 +166,7 @@ struct nci_conn_info { + * According to specification 102 622 chapter 4.4 Pipes, + * the pipe identifier is 7 bits long. + */ +-#define NCI_HCI_MAX_PIPES 127 ++#define NCI_HCI_MAX_PIPES 128 + + struct nci_hci_gate { + u8 gate; +diff --git a/include/net/tls.h b/include/net/tls.h +index 1486b60c4de8..8b3d10917d99 100644 +--- a/include/net/tls.h ++++ b/include/net/tls.h +@@ -289,6 +289,7 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); + int tls_device_sendpage(struct sock *sk, struct page *page, + int offset, size_t size, int flags); + void tls_device_sk_destruct(struct sock *sk); ++void tls_device_free_resources_tx(struct sock *sk); + void tls_device_init(void); + void tls_device_cleanup(void); + int tls_tx_records(struct sock *sk, int flags); +@@ -312,6 +313,7 @@ int tls_push_sg(struct sock *sk, struct tls_context *ctx, + int flags); + int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, + int flags); ++bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx); + + int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx, + int flags, long *timeo); +@@ -364,7 +366,7 @@ tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, + static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) + { + #ifdef CONFIG_SOCK_VALIDATE_XMIT +- return sk_fullsock(sk) & ++ return sk_fullsock(sk) && + (smp_load_acquire(&sk->sk_validate_xmit_skb) == + &tls_validate_xmit_skb); + #else +diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c +index 878c62ec0190..dbd7656b4f73 100644 +--- a/kernel/events/ring_buffer.c ++++ b/kernel/events/ring_buffer.c +@@ -456,24 +456,21 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) + rb->aux_head += size; + } + +- if (size || handle->aux_flags) { +- /* +- * Only send RECORD_AUX if we have something useful to communicate +- * +- * Note: the OVERWRITE records by themselves are not considered +- * useful, as they don't communicate any *new* information, +- * aside from the short-lived offset, that becomes history at +- * the next event sched-in and therefore isn't useful. +- * The userspace that needs to copy out AUX data in overwrite +- * mode should know to use user_page::aux_head for the actual +- * offset. So, from now on we don't output AUX records that +- * have *only* OVERWRITE flag set. +- */ +- +- if (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE) +- perf_event_aux_event(handle->event, aux_head, size, +- handle->aux_flags); +- } ++ /* ++ * Only send RECORD_AUX if we have something useful to communicate ++ * ++ * Note: the OVERWRITE records by themselves are not considered ++ * useful, as they don't communicate any *new* information, ++ * aside from the short-lived offset, that becomes history at ++ * the next event sched-in and therefore isn't useful. ++ * The userspace that needs to copy out AUX data in overwrite ++ * mode should know to use user_page::aux_head for the actual ++ * offset. So, from now on we don't output AUX records that ++ * have *only* OVERWRITE flag set. ++ */ ++ if (size || (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE)) ++ perf_event_aux_event(handle->event, aux_head, size, ++ handle->aux_flags); + + rb->user_page->aux_head = rb->aux_head; + if (rb_need_aux_wakeup(rb)) +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index f4ddfdd2d07e..de78d1b998f8 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -709,7 +709,6 @@ static void unoptimize_kprobe(struct kprobe *p, bool force) + static int reuse_unused_kprobe(struct kprobe *ap) + { + struct optimized_kprobe *op; +- int ret; + + /* + * Unused kprobe MUST be on the way of delayed unoptimizing (means +@@ -720,9 +719,8 @@ static int reuse_unused_kprobe(struct kprobe *ap) + /* Enable the probe again */ + ap->flags &= ~KPROBE_FLAG_DISABLED; + /* Optimize it again (remove from op->list) */ +- ret = kprobe_optready(ap); +- if (ret) +- return ret; ++ if (!kprobe_optready(ap)) ++ return -EINVAL; + + optimize_kprobe(ap); + return 0; +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 5e61a1a99e38..eeb605656d59 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -4859,12 +4859,15 @@ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) + return HRTIMER_NORESTART; + } + ++extern const u64 max_cfs_quota_period; ++ + static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) + { + struct cfs_bandwidth *cfs_b = + container_of(timer, struct cfs_bandwidth, period_timer); + int overrun; + int idle = 0; ++ int count = 0; + + raw_spin_lock(&cfs_b->lock); + for (;;) { +@@ -4872,6 +4875,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) + if (!overrun) + break; + ++ if (++count > 3) { ++ u64 new, old = ktime_to_ns(cfs_b->period); ++ ++ new = (old * 147) / 128; /* ~115% */ ++ new = min(new, max_cfs_quota_period); ++ ++ cfs_b->period = ns_to_ktime(new); ++ ++ /* since max is 1s, this is limited to 1e9^2, which fits in u64 */ ++ cfs_b->quota *= new; ++ cfs_b->quota = div64_u64(cfs_b->quota, old); ++ ++ pr_warn_ratelimited( ++ "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n", ++ smp_processor_id(), ++ div_u64(new, NSEC_PER_USEC), ++ div_u64(cfs_b->quota, NSEC_PER_USEC)); ++ ++ /* reset count so we don't come right back in here */ ++ count = 0; ++ } ++ + idle = do_sched_cfs_period_timer(cfs_b, overrun); + } + if (idle) +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 28ec71d914c7..f50f1471c119 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -126,6 +126,7 @@ static int zero; + static int __maybe_unused one = 1; + static int __maybe_unused two = 2; + static int __maybe_unused four = 4; ++static unsigned long zero_ul; + static unsigned long one_ul = 1; + static unsigned long long_max = LONG_MAX; + static int one_hundred = 100; +@@ -1723,7 +1724,7 @@ static struct ctl_table fs_table[] = { + .maxlen = sizeof(files_stat.max_files), + .mode = 0644, + .proc_handler = proc_doulongvec_minmax, +- .extra1 = &zero, ++ .extra1 = &zero_ul, + .extra2 = &long_max, + }, + { +diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c +index 094b82ca95e5..930113b9799a 100644 +--- a/kernel/time/sched_clock.c ++++ b/kernel/time/sched_clock.c +@@ -272,7 +272,7 @@ static u64 notrace suspended_sched_clock_read(void) + return cd.read_data[seq & 1].epoch_cyc; + } + +-static int sched_clock_suspend(void) ++int sched_clock_suspend(void) + { + struct clock_read_data *rd = &cd.read_data[0]; + +@@ -283,7 +283,7 @@ static int sched_clock_suspend(void) + return 0; + } + +-static void sched_clock_resume(void) ++void sched_clock_resume(void) + { + struct clock_read_data *rd = &cd.read_data[0]; + +diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c +index 529143b4c8d2..df401463a191 100644 +--- a/kernel/time/tick-common.c ++++ b/kernel/time/tick-common.c +@@ -487,6 +487,7 @@ void tick_freeze(void) + trace_suspend_resume(TPS("timekeeping_freeze"), + smp_processor_id(), true); + system_state = SYSTEM_SUSPEND; ++ sched_clock_suspend(); + timekeeping_suspend(); + } else { + tick_suspend_local(); +@@ -510,6 +511,7 @@ void tick_unfreeze(void) + + if (tick_freeze_depth == num_online_cpus()) { + timekeeping_resume(); ++ sched_clock_resume(); + system_state = SYSTEM_RUNNING; + trace_suspend_resume(TPS("timekeeping_freeze"), + smp_processor_id(), false); +diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h +index 7a9b4eb7a1d5..141ab3ab0354 100644 +--- a/kernel/time/timekeeping.h ++++ b/kernel/time/timekeeping.h +@@ -14,6 +14,13 @@ extern u64 timekeeping_max_deferment(void); + extern void timekeeping_warp_clock(void); + extern int timekeeping_suspend(void); + extern void timekeeping_resume(void); ++#ifdef CONFIG_GENERIC_SCHED_CLOCK ++extern int sched_clock_suspend(void); ++extern void sched_clock_resume(void); ++#else ++static inline int sched_clock_suspend(void) { return 0; } ++static inline void sched_clock_resume(void) { } ++#endif + + extern void do_timer(unsigned long ticks); + extern void update_wall_time(void); +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c +index aac7847c0214..f546ae5102e0 100644 +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -33,6 +33,7 @@ + #include + #include + #include ++#include + + #include + +@@ -6216,7 +6217,7 @@ void ftrace_reset_array_ops(struct trace_array *tr) + tr->ops->func = ftrace_stub; + } + +-static inline void ++static nokprobe_inline void + __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ignored, struct pt_regs *regs) + { +@@ -6276,11 +6277,13 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, + { + __ftrace_ops_list_func(ip, parent_ip, NULL, regs); + } ++NOKPROBE_SYMBOL(ftrace_ops_list_func); + #else + static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) + { + __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); + } ++NOKPROBE_SYMBOL(ftrace_ops_no_ops); + #endif + + /* +@@ -6307,6 +6310,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, + preempt_enable_notrace(); + trace_clear_recursion(bit); + } ++NOKPROBE_SYMBOL(ftrace_ops_assist_func); + + /** + * ftrace_ops_get_func - get the function a trampoline should call +diff --git a/mm/mmap.c b/mm/mmap.c +index fc1809b1bed6..da9236a5022e 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -45,6 +45,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -2526,7 +2527,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) + vma = find_vma_prev(mm, addr, &prev); + if (vma && (vma->vm_start <= addr)) + return vma; +- if (!prev || expand_stack(prev, addr)) ++ /* don't alter vm_end if the coredump is running */ ++ if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr)) + return NULL; + if (prev->vm_flags & VM_LOCKED) + populate_vma_page_range(prev, addr, prev->vm_end, NULL); +@@ -2552,6 +2554,9 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) + return vma; + if (!(vma->vm_flags & VM_GROWSDOWN)) + return NULL; ++ /* don't alter vm_start if the coredump is running */ ++ if (!mmget_still_valid(mm)) ++ return NULL; + start = vma->vm_start; + if (expand_stack(vma, addr)) + return NULL; +diff --git a/mm/percpu.c b/mm/percpu.c +index db86282fd024..59bd6a51954c 100644 +--- a/mm/percpu.c ++++ b/mm/percpu.c +@@ -2531,8 +2531,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, + ai->groups[group].base_offset = areas[group] - base; + } + +- pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", +- PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, ++ pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n", ++ PFN_DOWN(size_sum), ai->static_size, ai->reserved_size, + ai->dyn_size, ai->unit_size); + + rc = pcpu_setup_first_chunk(ai, base); +@@ -2653,8 +2653,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size, + } + + /* we're ready, commit */ +- pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n", +- unit_pages, psize_str, vm.addr, ai->static_size, ++ pr_info("%d %s pages/cpu s%zu r%zu d%zu\n", ++ unit_pages, psize_str, ai->static_size, + ai->reserved_size, ai->dyn_size); + + rc = pcpu_setup_first_chunk(ai, vm.addr); +diff --git a/mm/vmstat.c b/mm/vmstat.c +index 83b30edc2f7f..f807f2e3b4cb 100644 +--- a/mm/vmstat.c ++++ b/mm/vmstat.c +@@ -1274,13 +1274,8 @@ const char * const vmstat_text[] = { + #endif + #endif /* CONFIG_MEMORY_BALLOON */ + #ifdef CONFIG_DEBUG_TLBFLUSH +-#ifdef CONFIG_SMP + "nr_tlb_remote_flush", + "nr_tlb_remote_flush_received", +-#else +- "", /* nr_tlb_remote_flush */ +- "", /* nr_tlb_remote_flush_received */ +-#endif /* CONFIG_SMP */ + "nr_tlb_local_flush_all", + "nr_tlb_local_flush_one", + #endif /* CONFIG_DEBUG_TLBFLUSH */ +diff --git a/net/atm/lec.c b/net/atm/lec.c +index d7f5cf5b7594..ad4f829193f0 100644 +--- a/net/atm/lec.c ++++ b/net/atm/lec.c +@@ -710,7 +710,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg) + + static int lec_mcast_attach(struct atm_vcc *vcc, int arg) + { +- if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg]) ++ if (arg < 0 || arg >= MAX_LEC_ITF) ++ return -EINVAL; ++ arg = array_index_nospec(arg, MAX_LEC_ITF); ++ if (!dev_lec[arg]) + return -EINVAL; + vcc->proto_data = dev_lec[arg]; + return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc); +@@ -728,6 +731,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg) + i = arg; + if (arg >= MAX_LEC_ITF) + return -EINVAL; ++ i = array_index_nospec(arg, MAX_LEC_ITF); + if (!dev_lec[i]) { + int size; + +diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c +index 5ea7e56119c1..ba303ee99b9b 100644 +--- a/net/bridge/br_input.c ++++ b/net/bridge/br_input.c +@@ -197,13 +197,10 @@ static void __br_handle_local_finish(struct sk_buff *skb) + /* note: already called with rcu_read_lock */ + static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb) + { +- struct net_bridge_port *p = br_port_get_rcu(skb->dev); +- + __br_handle_local_finish(skb); + +- BR_INPUT_SKB_CB(skb)->brdev = p->br->dev; +- br_pass_frame_up(skb); +- return 0; ++ /* return 1 to signal the okfn() was called so it's ok to use the skb */ ++ return 1; + } + + /* +@@ -280,10 +277,18 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb) + goto forward; + } + +- /* Deliver packet to local host only */ +- NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev), +- NULL, skb, skb->dev, NULL, br_handle_local_finish); +- return RX_HANDLER_CONSUMED; ++ /* The else clause should be hit when nf_hook(): ++ * - returns < 0 (drop/error) ++ * - returns = 0 (stolen/nf_queue) ++ * Thus return 1 from the okfn() to signal the skb is ok to pass ++ */ ++ if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, ++ dev_net(skb->dev), NULL, skb, skb->dev, NULL, ++ br_handle_local_finish) == 1) { ++ return RX_HANDLER_PASS; ++ } else { ++ return RX_HANDLER_CONSUMED; ++ } + } + + forward: +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c +index e4777614a8a0..61ff0d497da6 100644 +--- a/net/bridge/br_multicast.c ++++ b/net/bridge/br_multicast.c +@@ -1916,7 +1916,8 @@ static void br_multicast_start_querier(struct net_bridge *br, + + __br_multicast_open(br, query); + +- list_for_each_entry(port, &br->port_list, list) { ++ rcu_read_lock(); ++ list_for_each_entry_rcu(port, &br->port_list, list) { + if (port->state == BR_STATE_DISABLED || + port->state == BR_STATE_BLOCKING) + continue; +@@ -1928,6 +1929,7 @@ static void br_multicast_start_querier(struct net_bridge *br, + br_multicast_enable(&port->ip6_own_query); + #endif + } ++ rcu_read_unlock(); + } + + int br_multicast_toggle(struct net_bridge *br, unsigned long val) +diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c +index 9c07591b0232..7104cf13da84 100644 +--- a/net/bridge/br_netlink.c ++++ b/net/bridge/br_netlink.c +@@ -1441,7 +1441,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) + nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, + br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) || + nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT, +- br_opt_get(br, IFLA_BR_VLAN_STATS_PER_PORT))) ++ br_opt_get(br, BROPT_VLAN_STATS_PER_PORT))) + return -EMSGSIZE; + #endif + #ifdef CONFIG_BRIDGE_IGMP_SNOOPING +diff --git a/net/core/dev.c b/net/core/dev.c +index 12824e007e06..7277dd393c00 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -1184,7 +1184,21 @@ int dev_change_name(struct net_device *dev, const char *newname) + BUG_ON(!dev_net(dev)); + + net = dev_net(dev); +- if (dev->flags & IFF_UP) ++ ++ /* Some auto-enslaved devices e.g. failover slaves are ++ * special, as userspace might rename the device after ++ * the interface had been brought up and running since ++ * the point kernel initiated auto-enslavement. Allow ++ * live name change even when these slave devices are ++ * up and running. ++ * ++ * Typically, users of these auto-enslaving devices ++ * don't actually care about slave name change, as ++ * they are supposed to operate on master interface ++ * directly. ++ */ ++ if (dev->flags & IFF_UP && ++ likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK))) + return -EBUSY; + + write_seqcount_begin(&devnet_rename_seq); +diff --git a/net/core/failover.c b/net/core/failover.c +index 4a92a98ccce9..b5cd3c727285 100644 +--- a/net/core/failover.c ++++ b/net/core/failover.c +@@ -80,14 +80,14 @@ static int failover_slave_register(struct net_device *slave_dev) + goto err_upper_link; + } + +- slave_dev->priv_flags |= IFF_FAILOVER_SLAVE; ++ slave_dev->priv_flags |= (IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK); + + if (fops && fops->slave_register && + !fops->slave_register(slave_dev, failover_dev)) + return NOTIFY_OK; + + netdev_upper_dev_unlink(slave_dev, failover_dev); +- slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE; ++ slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK); + err_upper_link: + netdev_rx_handler_unregister(slave_dev); + done: +@@ -121,7 +121,7 @@ int failover_slave_unregister(struct net_device *slave_dev) + + netdev_rx_handler_unregister(slave_dev); + netdev_upper_dev_unlink(slave_dev, failover_dev); +- slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE; ++ slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK); + + if (fops && fops->slave_unregister && + !fops->slave_unregister(slave_dev, failover_dev)) +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index ef2cd5712098..40796b8bf820 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -5083,7 +5083,8 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len); + + static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) + { +- int mac_len; ++ int mac_len, meta_len; ++ void *meta; + + if (skb_cow(skb, skb_headroom(skb)) < 0) { + kfree_skb(skb); +@@ -5095,6 +5096,13 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) + memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), + mac_len - VLAN_HLEN - ETH_TLEN); + } ++ ++ meta_len = skb_metadata_len(skb); ++ if (meta_len) { ++ meta = skb_metadata_end(skb) - meta_len; ++ memmove(meta + VLAN_HLEN, meta, meta_len); ++ } ++ + skb->mac_header += VLAN_HLEN; + return skb; + } +diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c +index 79e98e21cdd7..12ce6c526d72 100644 +--- a/net/ipv4/fou.c ++++ b/net/ipv4/fou.c +@@ -121,6 +121,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb) + struct guehdr *guehdr; + void *data; + u16 doffset = 0; ++ u8 proto_ctype; + + if (!fou) + return 1; +@@ -212,13 +213,14 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb) + if (unlikely(guehdr->control)) + return gue_control_message(skb, guehdr); + ++ proto_ctype = guehdr->proto_ctype; + __skb_pull(skb, sizeof(struct udphdr) + hdrlen); + skb_reset_transport_header(skb); + + if (iptunnel_pull_offloads(skb)) + goto drop; + +- return -guehdr->proto_ctype; ++ return -proto_ctype; + + drop: + kfree_skb(skb); +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index e04cdb58a602..25d9bef27d03 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -1185,9 +1185,23 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) + + static void ipv4_link_failure(struct sk_buff *skb) + { ++ struct ip_options opt; + struct rtable *rt; ++ int res; + +- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); ++ /* Recompile ip options since IPCB may not be valid anymore. ++ */ ++ memset(&opt, 0, sizeof(opt)); ++ opt.optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr); ++ ++ rcu_read_lock(); ++ res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL); ++ rcu_read_unlock(); ++ ++ if (res) ++ return; ++ ++ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt); + + rt = skb_rtable(skb); + if (rt) +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 7b1ef897b398..95b2e31fff08 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -402,11 +402,12 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) + static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) + { + struct tcp_sock *tp = tcp_sk(sk); ++ int room; ++ ++ room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh; + + /* Check #1 */ +- if (tp->rcv_ssthresh < tp->window_clamp && +- (int)tp->rcv_ssthresh < tcp_space(sk) && +- !tcp_under_memory_pressure(sk)) { ++ if (room > 0 && !tcp_under_memory_pressure(sk)) { + int incr; + + /* Check #2. Increase window, if skb with such overhead +@@ -419,8 +420,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) + + if (incr) { + incr = max_t(int, incr, 2 * skb->len); +- tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, +- tp->window_clamp); ++ tp->rcv_ssthresh += min(room, incr); + inet_csk(sk)->icsk_ack.quick |= 1; + } + } +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 0086acc16f3c..b6a97115a906 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -2336,6 +2336,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, + + rcu_read_lock(); + from = rcu_dereference(rt6->from); ++ if (!from) { ++ rcu_read_unlock(); ++ return; ++ } + nrt6 = ip6_rt_cache_alloc(from, daddr, saddr); + if (nrt6) { + rt6_do_update_pmtu(nrt6, mtu); +diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h +index 3e0d5922a440..a9c1d6e3cdae 100644 +--- a/net/mac80211/driver-ops.h ++++ b/net/mac80211/driver-ops.h +@@ -1166,6 +1166,9 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local, + { + struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif); + ++ if (local->in_reconfig) ++ return; ++ + if (!check_sdata_in_driver(sdata)) + return; + +diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c +index ddfc52ac1f9b..c0d323b58e73 100644 +--- a/net/nfc/nci/hci.c ++++ b/net/nfc/nci/hci.c +@@ -312,6 +312,10 @@ static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe, + create_info = (struct nci_hci_create_pipe_resp *)skb->data; + dest_gate = create_info->dest_gate; + new_pipe = create_info->pipe; ++ if (new_pipe >= NCI_HCI_MAX_PIPES) { ++ status = NCI_HCI_ANY_E_NOK; ++ goto exit; ++ } + + /* Save the new created pipe and bind with local gate, + * the description for skb->data[3] is destination gate id +@@ -336,6 +340,10 @@ static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe, + goto exit; + } + delete_info = (struct nci_hci_delete_pipe_noti *)skb->data; ++ if (delete_info->pipe >= NCI_HCI_MAX_PIPES) { ++ status = NCI_HCI_ANY_E_NOK; ++ goto exit; ++ } + + ndev->hci_dev->pipes[delete_info->pipe].gate = + NCI_HCI_INVALID_GATE; +diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c +index 73940293700d..7b5ce1343474 100644 +--- a/net/sched/sch_cake.c ++++ b/net/sched/sch_cake.c +@@ -1508,32 +1508,29 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free) + return idx + (tin << 16); + } + +-static void cake_wash_diffserv(struct sk_buff *skb) +-{ +- switch (skb->protocol) { +- case htons(ETH_P_IP): +- ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0); +- break; +- case htons(ETH_P_IPV6): +- ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0); +- break; +- default: +- break; +- } +-} +- + static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash) + { ++ int wlen = skb_network_offset(skb); + u8 dscp; + +- switch (skb->protocol) { ++ switch (tc_skb_protocol(skb)) { + case htons(ETH_P_IP): ++ wlen += sizeof(struct iphdr); ++ if (!pskb_may_pull(skb, wlen) || ++ skb_try_make_writable(skb, wlen)) ++ return 0; ++ + dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; + if (wash && dscp) + ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0); + return dscp; + + case htons(ETH_P_IPV6): ++ wlen += sizeof(struct ipv6hdr); ++ if (!pskb_may_pull(skb, wlen) || ++ skb_try_make_writable(skb, wlen)) ++ return 0; ++ + dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; + if (wash && dscp) + ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0); +@@ -1553,25 +1550,27 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch, + { + struct cake_sched_data *q = qdisc_priv(sch); + u32 tin; ++ u8 dscp; ++ ++ /* Tin selection: Default to diffserv-based selection, allow overriding ++ * using firewall marks or skb->priority. ++ */ ++ dscp = cake_handle_diffserv(skb, ++ q->rate_flags & CAKE_FLAG_WASH); + +- if (TC_H_MAJ(skb->priority) == sch->handle && +- TC_H_MIN(skb->priority) > 0 && +- TC_H_MIN(skb->priority) <= q->tin_cnt) { ++ if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT) ++ tin = 0; ++ ++ else if (TC_H_MAJ(skb->priority) == sch->handle && ++ TC_H_MIN(skb->priority) > 0 && ++ TC_H_MIN(skb->priority) <= q->tin_cnt) + tin = q->tin_order[TC_H_MIN(skb->priority) - 1]; + +- if (q->rate_flags & CAKE_FLAG_WASH) +- cake_wash_diffserv(skb); +- } else if (q->tin_mode != CAKE_DIFFSERV_BESTEFFORT) { +- /* extract the Diffserv Precedence field, if it exists */ +- /* and clear DSCP bits if washing */ +- tin = q->tin_index[cake_handle_diffserv(skb, +- q->rate_flags & CAKE_FLAG_WASH)]; ++ else { ++ tin = q->tin_index[dscp]; ++ + if (unlikely(tin >= q->tin_cnt)) + tin = 0; +- } else { +- tin = 0; +- if (q->rate_flags & CAKE_FLAG_WASH) +- cake_wash_diffserv(skb); + } + + return &q->tins[tin]; +diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c +index da1a676860ca..0f4e42792878 100644 +--- a/net/strparser/strparser.c ++++ b/net/strparser/strparser.c +@@ -140,13 +140,11 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, + /* We are going to append to the frags_list of head. + * Need to unshare the frag_list. + */ +- if (skb_has_frag_list(head)) { +- err = skb_unclone(head, GFP_ATOMIC); +- if (err) { +- STRP_STATS_INCR(strp->stats.mem_fail); +- desc->error = err; +- return 0; +- } ++ err = skb_unclone(head, GFP_ATOMIC); ++ if (err) { ++ STRP_STATS_INCR(strp->stats.mem_fail); ++ desc->error = err; ++ return 0; + } + + if (unlikely(skb_shinfo(head)->frag_list)) { +diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c +index bff241f03525..89993afe0fbd 100644 +--- a/net/tipc/name_table.c ++++ b/net/tipc/name_table.c +@@ -909,7 +909,8 @@ static int tipc_nl_service_list(struct net *net, struct tipc_nl_msg *msg, + for (; i < TIPC_NAMETBL_SIZE; i++) { + head = &tn->nametbl->services[i]; + +- if (*last_type) { ++ if (*last_type || ++ (!i && *last_key && (*last_lower == *last_key))) { + service = tipc_service_find(net, *last_type); + if (!service) + return -EPIPE; +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c +index d753e362d2d9..4b5ff3d44912 100644 +--- a/net/tls/tls_device.c ++++ b/net/tls/tls_device.c +@@ -52,8 +52,11 @@ static DEFINE_SPINLOCK(tls_device_lock); + + static void tls_device_free_ctx(struct tls_context *ctx) + { +- if (ctx->tx_conf == TLS_HW) ++ if (ctx->tx_conf == TLS_HW) { + kfree(tls_offload_ctx_tx(ctx)); ++ kfree(ctx->tx.rec_seq); ++ kfree(ctx->tx.iv); ++ } + + if (ctx->rx_conf == TLS_HW) + kfree(tls_offload_ctx_rx(ctx)); +@@ -216,6 +219,13 @@ void tls_device_sk_destruct(struct sock *sk) + } + EXPORT_SYMBOL(tls_device_sk_destruct); + ++void tls_device_free_resources_tx(struct sock *sk) ++{ ++ struct tls_context *tls_ctx = tls_get_ctx(sk); ++ ++ tls_free_partial_record(sk, tls_ctx); ++} ++ + static void tls_append_frag(struct tls_record_info *record, + struct page_frag *pfrag, + int size) +diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c +index 78cb4a584080..96dbac91ac6e 100644 +--- a/net/tls/tls_main.c ++++ b/net/tls/tls_main.c +@@ -220,6 +220,26 @@ int tls_push_pending_closed_record(struct sock *sk, + return tls_ctx->push_pending_record(sk, flags); + } + ++bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx) ++{ ++ struct scatterlist *sg; ++ ++ sg = ctx->partially_sent_record; ++ if (!sg) ++ return false; ++ ++ while (1) { ++ put_page(sg_page(sg)); ++ sk_mem_uncharge(sk, sg->length); ++ ++ if (sg_is_last(sg)) ++ break; ++ sg++; ++ } ++ ctx->partially_sent_record = NULL; ++ return true; ++} ++ + static void tls_write_space(struct sock *sk) + { + struct tls_context *ctx = tls_get_ctx(sk); +@@ -278,6 +298,10 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) + kfree(ctx->tx.rec_seq); + kfree(ctx->tx.iv); + tls_sw_free_resources_tx(sk); ++#ifdef CONFIG_TLS_DEVICE ++ } else if (ctx->tx_conf == TLS_HW) { ++ tls_device_free_resources_tx(sk); ++#endif + } + + if (ctx->rx_conf == TLS_SW) { +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c +index bf5b54b513bc..d2d4f7c0d4be 100644 +--- a/net/tls/tls_sw.c ++++ b/net/tls/tls_sw.c +@@ -1804,20 +1804,7 @@ void tls_sw_free_resources_tx(struct sock *sk) + /* Free up un-sent records in tx_list. First, free + * the partially sent record if any at head of tx_list. + */ +- if (tls_ctx->partially_sent_record) { +- struct scatterlist *sg = tls_ctx->partially_sent_record; +- +- while (1) { +- put_page(sg_page(sg)); +- sk_mem_uncharge(sk, sg->length); +- +- if (sg_is_last(sg)) +- break; +- sg++; +- } +- +- tls_ctx->partially_sent_record = NULL; +- ++ if (tls_free_partial_record(sk, tls_ctx)) { + rec = list_first_entry(&ctx->tx_list, + struct tls_rec, list); + list_del(&rec->list); +diff --git a/security/device_cgroup.c b/security/device_cgroup.c +index cd97929fac66..dc28914fa72e 100644 +--- a/security/device_cgroup.c ++++ b/security/device_cgroup.c +@@ -560,7 +560,7 @@ static int propagate_exception(struct dev_cgroup *devcg_root, + devcg->behavior == DEVCG_DEFAULT_ALLOW) { + rc = dev_exception_add(devcg, ex); + if (rc) +- break; ++ return rc; + } else { + /* + * in the other possible cases: +diff --git a/sound/core/info.c b/sound/core/info.c +index fe502bc5e6d2..679136fba730 100644 +--- a/sound/core/info.c ++++ b/sound/core/info.c +@@ -722,8 +722,11 @@ snd_info_create_entry(const char *name, struct snd_info_entry *parent) + INIT_LIST_HEAD(&entry->children); + INIT_LIST_HEAD(&entry->list); + entry->parent = parent; +- if (parent) ++ if (parent) { ++ mutex_lock(&parent->access); + list_add_tail(&entry->list, &parent->children); ++ mutex_unlock(&parent->access); ++ } + return entry; + } + +@@ -805,7 +808,12 @@ void snd_info_free_entry(struct snd_info_entry * entry) + list_for_each_entry_safe(p, n, &entry->children, list) + snd_info_free_entry(p); + +- list_del(&entry->list); ++ p = entry->parent; ++ if (p) { ++ mutex_lock(&p->access); ++ list_del(&entry->list); ++ mutex_unlock(&p->access); ++ } + kfree(entry->name); + if (entry->private_free) + entry->private_free(entry); +diff --git a/sound/core/init.c b/sound/core/init.c +index 4849c611c0fe..16b7cc7aa66b 100644 +--- a/sound/core/init.c ++++ b/sound/core/init.c +@@ -407,14 +407,7 @@ int snd_card_disconnect(struct snd_card *card) + card->shutdown = 1; + spin_unlock(&card->files_lock); + +- /* phase 1: disable fops (user space) operations for ALSA API */ +- mutex_lock(&snd_card_mutex); +- snd_cards[card->number] = NULL; +- clear_bit(card->number, snd_cards_lock); +- mutex_unlock(&snd_card_mutex); +- +- /* phase 2: replace file->f_op with special dummy operations */ +- ++ /* replace file->f_op with special dummy operations */ + spin_lock(&card->files_lock); + list_for_each_entry(mfile, &card->files_list, list) { + /* it's critical part, use endless loop */ +@@ -430,7 +423,7 @@ int snd_card_disconnect(struct snd_card *card) + } + spin_unlock(&card->files_lock); + +- /* phase 3: notify all connected devices about disconnection */ ++ /* notify all connected devices about disconnection */ + /* at this point, they cannot respond to any calls except release() */ + + #if IS_ENABLED(CONFIG_SND_MIXER_OSS) +@@ -446,6 +439,13 @@ int snd_card_disconnect(struct snd_card *card) + device_del(&card->card_dev); + card->registered = false; + } ++ ++ /* disable fops (user space) operations for ALSA API */ ++ mutex_lock(&snd_card_mutex); ++ snd_cards[card->number] = NULL; ++ clear_bit(card->number, snd_cards_lock); ++ mutex_unlock(&snd_card_mutex); ++ + #ifdef CONFIG_PM + wake_up(&card->power_sleep); + #endif +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 84fae0df59e9..f061167062bc 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -7247,6 +7247,8 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { + {0x12, 0x90a60140}, + {0x14, 0x90170150}, + {0x21, 0x02211020}), ++ SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, ++ {0x21, 0x02211020}), + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE, + {0x14, 0x90170110}, + {0x21, 0x02211020}), +@@ -7357,6 +7359,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { + {0x21, 0x0221101f}), + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC256_STANDARD_PINS), ++ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, ++ {0x14, 0x90170110}, ++ {0x1b, 0x01011020}, ++ {0x21, 0x0221101f}), + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC, + {0x14, 0x90170110}, + {0x1b, 0x90a70130}, diff --git a/patch/kernel/sunxi-dev/xxx-orangepizero-plus2-default-regulator.patch b/patch/kernel/sunxi-dev/xxx-orangepizero-plus2-default-regulator.patch index ad9deb048..ef3168241 100644 --- a/patch/kernel/sunxi-dev/xxx-orangepizero-plus2-default-regulator.patch +++ b/patch/kernel/sunxi-dev/xxx-orangepizero-plus2-default-regulator.patch @@ -13,15 +13,15 @@ index 50ad2e4fd..e1ee1cd09 100644 + regulator-type = "voltage"; + regulator-boot-on; + regulator-always-on; -+ regulator-min-microvolt = <1100000>; -+ regulator-max-microvolt = <1100000>; ++ regulator-min-microvolt = <1108475>; ++ regulator-max-microvolt = <1307810>; + regulator-ramp-delay = <50>; /* 4ms */ -+ ++ enable-gpios = <&r_pio 0 8 GPIO_ACTIVE_HIGH>; /* PL8 */ + gpios = <&r_pio 0 6 0>; /* PL6 */ + enable-active-high; + gpios-states = <0x1>; -+ states = <1100000 0x0 -+ 1100000 0x1>; ++ states = <1108475 0x0 ++ 1307810 0x1>; + }; +}; + diff --git a/patch/kernel/sunxi-next/xxx-orangepizero-plus2-default-regulator.patch b/patch/kernel/sunxi-next/xxx-orangepizero-plus2-default-regulator.patch index ad9deb048..ef3168241 100644 --- a/patch/kernel/sunxi-next/xxx-orangepizero-plus2-default-regulator.patch +++ b/patch/kernel/sunxi-next/xxx-orangepizero-plus2-default-regulator.patch @@ -13,15 +13,15 @@ index 50ad2e4fd..e1ee1cd09 100644 + regulator-type = "voltage"; + regulator-boot-on; + regulator-always-on; -+ regulator-min-microvolt = <1100000>; -+ regulator-max-microvolt = <1100000>; ++ regulator-min-microvolt = <1108475>; ++ regulator-max-microvolt = <1307810>; + regulator-ramp-delay = <50>; /* 4ms */ -+ ++ enable-gpios = <&r_pio 0 8 GPIO_ACTIVE_HIGH>; /* PL8 */ + gpios = <&r_pio 0 6 0>; /* PL6 */ + enable-active-high; + gpios-states = <0x1>; -+ states = <1100000 0x0 -+ 1100000 0x1>; ++ states = <1108475 0x0 ++ 1307810 0x1>; + }; +}; +