diff --git a/patch/kernel/archive/spacemit-6.6/patch-6.6.95-96.patch b/patch/kernel/archive/spacemit-6.6/patch-6.6.95-96.patch new file mode 100644 index 000000000..ccc41e987 --- /dev/null +++ b/patch/kernel/archive/spacemit-6.6/patch-6.6.95-96.patch @@ -0,0 +1,5580 @@ +diff --git a/Documentation/devicetree/bindings/serial/8250.yaml b/Documentation/devicetree/bindings/serial/8250.yaml +index 692aa05500fd53..6ba0325039be21 100644 +--- a/Documentation/devicetree/bindings/serial/8250.yaml ++++ b/Documentation/devicetree/bindings/serial/8250.yaml +@@ -45,7 +45,7 @@ allOf: + - ns16550 + - ns16550a + then: +- anyOf: ++ oneOf: + - required: [ clock-frequency ] + - required: [ clocks ] + +diff --git a/Makefile b/Makefile +index 679dff5e165c07..038fc8e0982bdc 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 95 ++SUBLEVEL = 96 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h +index 7f44e88d1f25bc..14a38cc67e0bc9 100644 +--- a/arch/arm/include/asm/ptrace.h ++++ b/arch/arm/include/asm/ptrace.h +@@ -10,6 +10,7 @@ + #include + + #ifndef __ASSEMBLY__ ++#include + #include + + struct pt_regs { +@@ -35,8 +36,8 @@ struct svc_pt_regs { + + #ifndef CONFIG_CPU_V7M + #define isa_mode(regs) \ +- ((((regs)->ARM_cpsr & PSR_J_BIT) >> (__ffs(PSR_J_BIT) - 1)) | \ +- (((regs)->ARM_cpsr & PSR_T_BIT) >> (__ffs(PSR_T_BIT)))) ++ (FIELD_GET(PSR_J_BIT, (regs)->ARM_cpsr) << 1 | \ ++ FIELD_GET(PSR_T_BIT, (regs)->ARM_cpsr)) + #else + #define isa_mode(regs) 1 /* Thumb */ + #endif +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S +index ebad8c8b8c57dd..0476ce7700dfaa 100644 +--- a/arch/s390/kernel/entry.S ++++ b/arch/s390/kernel/entry.S +@@ -639,7 +639,7 @@ SYM_CODE_START(stack_overflow) + stmg %r0,%r7,__PT_R0(%r11) + stmg %r8,%r9,__PT_PSW(%r11) + mvc __PT_R8(64,%r11),0(%r14) +- stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 ++ mvc __PT_ORIG_GPR2(8,%r11),__LC_PGM_LAST_BREAK + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) + lgr %r2,%r11 # pass pointer to pt_regs + jg kernel_stack_overflow +diff --git a/arch/um/drivers/ubd_user.c b/arch/um/drivers/ubd_user.c +index a1afe414ce4814..fb5b1e7c133d86 100644 +--- a/arch/um/drivers/ubd_user.c ++++ b/arch/um/drivers/ubd_user.c +@@ -41,7 +41,7 @@ int start_io_thread(unsigned long sp, int *fd_out) + *fd_out = fds[1]; + + err = os_set_fd_block(*fd_out, 0); +- err = os_set_fd_block(kernel_fd, 0); ++ err |= os_set_fd_block(kernel_fd, 0); + if (err) { + printk("start_io_thread - failed to set nonblocking I/O.\n"); + goto out_close; +diff --git a/arch/um/include/asm/asm-prototypes.h b/arch/um/include/asm/asm-prototypes.h +index 5898a26daa0dd4..408b31d591279d 100644 +--- a/arch/um/include/asm/asm-prototypes.h ++++ b/arch/um/include/asm/asm-prototypes.h +@@ -1 +1,6 @@ + #include ++#include ++ ++#ifdef CONFIG_UML_X86 ++extern void cmpxchg8b_emu(void); ++#endif +diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c +index 6d8ae86ae978fd..c16b80011adaac 100644 +--- a/arch/um/kernel/trap.c ++++ b/arch/um/kernel/trap.c +@@ -17,6 +17,122 @@ + #include + #include + ++/* ++ * NOTE: UML does not have exception tables. As such, this is almost a copy ++ * of the code in mm/memory.c, only adjusting the logic to simply check whether ++ * we are coming from the kernel instead of doing an additional lookup in the ++ * exception table. ++ * We can do this simplification because we never get here if the exception was ++ * fixable. ++ */ ++static inline bool get_mmap_lock_carefully(struct mm_struct *mm, bool is_user) ++{ ++ if (likely(mmap_read_trylock(mm))) ++ return true; ++ ++ if (!is_user) ++ return false; ++ ++ return !mmap_read_lock_killable(mm); ++} ++ ++static inline bool mmap_upgrade_trylock(struct mm_struct *mm) ++{ ++ /* ++ * We don't have this operation yet. ++ * ++ * It should be easy enough to do: it's basically a ++ * atomic_long_try_cmpxchg_acquire() ++ * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but ++ * it also needs the proper lockdep magic etc. ++ */ ++ return false; ++} ++ ++static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, bool is_user) ++{ ++ mmap_read_unlock(mm); ++ if (!is_user) ++ return false; ++ ++ return !mmap_write_lock_killable(mm); ++} ++ ++/* ++ * Helper for page fault handling. ++ * ++ * This is kind of equivalend to "mmap_read_lock()" followed ++ * by "find_extend_vma()", except it's a lot more careful about ++ * the locking (and will drop the lock on failure). ++ * ++ * For example, if we have a kernel bug that causes a page ++ * fault, we don't want to just use mmap_read_lock() to get ++ * the mm lock, because that would deadlock if the bug were ++ * to happen while we're holding the mm lock for writing. ++ * ++ * So this checks the exception tables on kernel faults in ++ * order to only do this all for instructions that are actually ++ * expected to fault. ++ * ++ * We can also actually take the mm lock for writing if we ++ * need to extend the vma, which helps the VM layer a lot. ++ */ ++static struct vm_area_struct * ++um_lock_mm_and_find_vma(struct mm_struct *mm, ++ unsigned long addr, bool is_user) ++{ ++ struct vm_area_struct *vma; ++ ++ if (!get_mmap_lock_carefully(mm, is_user)) ++ return NULL; ++ ++ vma = find_vma(mm, addr); ++ if (likely(vma && (vma->vm_start <= addr))) ++ return vma; ++ ++ /* ++ * Well, dang. We might still be successful, but only ++ * if we can extend a vma to do so. ++ */ ++ if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) { ++ mmap_read_unlock(mm); ++ return NULL; ++ } ++ ++ /* ++ * We can try to upgrade the mmap lock atomically, ++ * in which case we can continue to use the vma ++ * we already looked up. ++ * ++ * Otherwise we'll have to drop the mmap lock and ++ * re-take it, and also look up the vma again, ++ * re-checking it. ++ */ ++ if (!mmap_upgrade_trylock(mm)) { ++ if (!upgrade_mmap_lock_carefully(mm, is_user)) ++ return NULL; ++ ++ vma = find_vma(mm, addr); ++ if (!vma) ++ goto fail; ++ if (vma->vm_start <= addr) ++ goto success; ++ if (!(vma->vm_flags & VM_GROWSDOWN)) ++ goto fail; ++ } ++ ++ if (expand_stack_locked(vma, addr)) ++ goto fail; ++ ++success: ++ mmap_write_downgrade(mm); ++ return vma; ++ ++fail: ++ mmap_write_unlock(mm); ++ return NULL; ++} ++ + /* + * Note this is constrained to return 0, -EFAULT, -EACCES, -ENOMEM by + * segv(). +@@ -43,21 +159,10 @@ int handle_page_fault(unsigned long address, unsigned long ip, + if (is_user) + flags |= FAULT_FLAG_USER; + retry: +- mmap_read_lock(mm); +- vma = find_vma(mm, address); +- if (!vma) +- goto out; +- if (vma->vm_start <= address) +- goto good_area; +- if (!(vma->vm_flags & VM_GROWSDOWN)) +- goto out; +- if (is_user && !ARCH_IS_STACKGROW(address)) +- goto out; +- vma = expand_stack(mm, address); ++ vma = um_lock_mm_and_find_vma(mm, address, is_user); + if (!vma) + goto out_nosemaphore; + +-good_area: + *code_out = SEGV_ACCERR; + if (is_write) { + if (!(vma->vm_flags & VM_WRITE)) +diff --git a/arch/x86/tools/insn_decoder_test.c b/arch/x86/tools/insn_decoder_test.c +index 472540aeabc235..08cd913cbd4e9a 100644 +--- a/arch/x86/tools/insn_decoder_test.c ++++ b/arch/x86/tools/insn_decoder_test.c +@@ -10,8 +10,7 @@ + #include + #include + #include +- +-#define unlikely(cond) (cond) ++#include + + #include + #include +@@ -106,7 +105,7 @@ static void parse_args(int argc, char **argv) + } + } + +-#define BUFSIZE 256 ++#define BUFSIZE (256 + KSYM_NAME_LEN) + + int main(int argc, char **argv) + { +diff --git a/arch/x86/um/asm/checksum.h b/arch/x86/um/asm/checksum.h +index b07824500363fa..ddc144657efad9 100644 +--- a/arch/x86/um/asm/checksum.h ++++ b/arch/x86/um/asm/checksum.h +@@ -20,6 +20,9 @@ + */ + extern __wsum csum_partial(const void *buff, int len, __wsum sum); + ++/* Do not call this directly. Declared for export type visibility. */ ++extern __visible __wsum csum_partial_copy_generic(const void *src, void *dst, int len); ++ + /** + * csum_fold - Fold and invert a 32bit checksum. + * sum: 32bit unfolded sum +diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c +index d7f7f88009d7db..1728cae1e8409f 100644 +--- a/drivers/cxl/core/region.c ++++ b/drivers/cxl/core/region.c +@@ -1653,6 +1653,13 @@ static int find_pos_and_ways(struct cxl_port *port, struct range *range, + } + put_device(dev); + ++ if (rc) ++ dev_err(port->uport_dev, ++ "failed to find %s:%s in target list of %s\n", ++ dev_name(&port->dev), ++ dev_name(port->parent_dport->dport_dev), ++ dev_name(&cxlsd->cxld.dev)); ++ + return rc; + } + +diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c +index 7e3a67f9f0a654..aa39fcd389a942 100644 +--- a/drivers/dma/idxd/cdev.c ++++ b/drivers/dma/idxd/cdev.c +@@ -354,7 +354,9 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid) + set_bit(h, evl->bmap); + h = (h + 1) % size; + } +- drain_workqueue(wq->wq); ++ if (wq->wq) ++ drain_workqueue(wq->wq); ++ + mutex_unlock(&evl->lock); + } + +diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c +index 0a3b2e22f23dbb..14c4c5031b556f 100644 +--- a/drivers/dma/xilinx/xilinx_dma.c ++++ b/drivers/dma/xilinx/xilinx_dma.c +@@ -2900,6 +2900,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, + return -EINVAL; + } + ++ xdev->common.directions |= chan->direction; ++ + /* Request the interrupt */ + chan->irq = of_irq_get(node, chan->tdest); + if (chan->irq < 0) +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c +index 10119cf27ffde5..a8db2bde638401 100644 +--- a/drivers/edac/amd64_edac.c ++++ b/drivers/edac/amd64_edac.c +@@ -1475,7 +1475,9 @@ static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt) + if (csrow_enabled(2 * dimm + 1, ctrl, pvt)) + cs_mode |= CS_ODD_PRIMARY; + +- /* Asymmetric dual-rank DIMM support. */ ++ if (csrow_sec_enabled(2 * dimm, ctrl, pvt)) ++ cs_mode |= CS_EVEN_SECONDARY; ++ + if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt)) + cs_mode |= CS_ODD_SECONDARY; + +@@ -1496,12 +1498,13 @@ static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt) + return cs_mode; + } + +-static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode, +- int csrow_nr, int dimm) ++static int calculate_cs_size(u32 mask, unsigned int cs_mode) + { +- u32 msb, weight, num_zero_bits; +- u32 addr_mask_deinterleaved; +- int size = 0; ++ int msb, weight, num_zero_bits; ++ u32 deinterleaved_mask; ++ ++ if (!mask) ++ return 0; + + /* + * The number of zero bits in the mask is equal to the number of bits +@@ -1514,19 +1517,30 @@ static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode, + * without swapping with the most significant bit. This can be handled + * by keeping the MSB where it is and ignoring the single zero bit. + */ +- msb = fls(addr_mask_orig) - 1; +- weight = hweight_long(addr_mask_orig); ++ msb = fls(mask) - 1; ++ weight = hweight_long(mask); + num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE); + + /* Take the number of zero bits off from the top of the mask. */ +- addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1); ++ deinterleaved_mask = GENMASK(msb - num_zero_bits, 1); ++ edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", deinterleaved_mask); ++ ++ return (deinterleaved_mask >> 2) + 1; ++} ++ ++static int __addr_mask_to_cs_size(u32 addr_mask, u32 addr_mask_sec, ++ unsigned int cs_mode, int csrow_nr, int dimm) ++{ ++ int size; + + edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm); +- edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig); +- edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved); ++ edac_dbg(1, " Primary AddrMask: 0x%x\n", addr_mask); + + /* Register [31:1] = Address [39:9]. Size is in kBs here. */ +- size = (addr_mask_deinterleaved >> 2) + 1; ++ size = calculate_cs_size(addr_mask, cs_mode); ++ ++ edac_dbg(1, " Secondary AddrMask: 0x%x\n", addr_mask_sec); ++ size += calculate_cs_size(addr_mask_sec, cs_mode); + + /* Return size in MBs. */ + return size >> 10; +@@ -1535,8 +1549,8 @@ static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode, + static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc, + unsigned int cs_mode, int csrow_nr) + { ++ u32 addr_mask = 0, addr_mask_sec = 0; + int cs_mask_nr = csrow_nr; +- u32 addr_mask_orig; + int dimm, size = 0; + + /* No Chip Selects are enabled. */ +@@ -1574,13 +1588,13 @@ static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc, + if (!pvt->flags.zn_regs_v2) + cs_mask_nr >>= 1; + +- /* Asymmetric dual-rank DIMM support. */ +- if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY)) +- addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr]; +- else +- addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr]; ++ if (cs_mode & (CS_EVEN_PRIMARY | CS_ODD_PRIMARY)) ++ addr_mask = pvt->csels[umc].csmasks[cs_mask_nr]; ++ ++ if (cs_mode & (CS_EVEN_SECONDARY | CS_ODD_SECONDARY)) ++ addr_mask_sec = pvt->csels[umc].csmasks_sec[cs_mask_nr]; + +- return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, dimm); ++ return __addr_mask_to_cs_size(addr_mask, addr_mask_sec, cs_mode, csrow_nr, dimm); + } + + static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) +@@ -3773,9 +3787,10 @@ static void gpu_get_err_info(struct mce *m, struct err_info *err) + static int gpu_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc, + unsigned int cs_mode, int csrow_nr) + { +- u32 addr_mask_orig = pvt->csels[umc].csmasks[csrow_nr]; ++ u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr]; ++ u32 addr_mask_sec = pvt->csels[umc].csmasks_sec[csrow_nr]; + +- return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, csrow_nr >> 1); ++ return __addr_mask_to_cs_size(addr_mask, addr_mask_sec, cs_mode, csrow_nr, csrow_nr >> 1); + } + + static void gpu_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) +diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c +index 65d1e66a347d75..d1fd2e492909e5 100644 +--- a/drivers/firmware/arm_scmi/driver.c ++++ b/drivers/firmware/arm_scmi/driver.c +@@ -1547,6 +1547,39 @@ static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph, + return ret; + } + ++/** ++ * scmi_protocol_msg_check - Check protocol message attributes ++ * ++ * @ph: A reference to the protocol handle. ++ * @message_id: The ID of the message to check. ++ * @attributes: A parameter to optionally return the retrieved message ++ * attributes, in case of Success. ++ * ++ * An helper to check protocol message attributes for a specific protocol ++ * and message pair. ++ * ++ * Return: 0 on SUCCESS ++ */ ++static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph, ++ u32 message_id, u32 *attributes) ++{ ++ int ret; ++ struct scmi_xfer *t; ++ ++ ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES, ++ sizeof(__le32), 0, &t); ++ if (ret) ++ return ret; ++ ++ put_unaligned_le32(message_id, t->tx.buf); ++ ret = do_xfer(ph, t); ++ if (!ret && attributes) ++ *attributes = get_unaligned_le32(t->rx.buf); ++ xfer_put(ph, t); ++ ++ return ret; ++} ++ + /** + * struct scmi_iterator - Iterator descriptor + * @msg: A reference to the message TX buffer; filled by @prepare_message with +@@ -1688,6 +1721,7 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph, + int ret; + u32 flags; + u64 phys_addr; ++ u32 attributes; + u8 size; + void __iomem *addr; + struct scmi_xfer *t; +@@ -1696,6 +1730,15 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph, + struct scmi_msg_resp_desc_fc *resp; + const struct scmi_protocol_instance *pi = ph_to_pi(ph); + ++ /* Check if the MSG_ID supports fastchannel */ ++ ret = scmi_protocol_msg_check(ph, message_id, &attributes); ++ if (ret || !MSG_SUPPORTS_FASTCHANNEL(attributes)) { ++ dev_dbg(ph->dev, ++ "Skip FC init for 0x%02X/%d domain:%d - ret:%d\n", ++ pi->proto->id, message_id, domain, ret); ++ return; ++ } ++ + if (!p_addr) { + ret = -EINVAL; + goto err_out; +@@ -1824,6 +1867,7 @@ static const struct scmi_proto_helpers_ops helpers_ops = { + .extended_name_get = scmi_common_extended_name_get, + .iter_response_init = scmi_iterator_init, + .iter_response_run = scmi_iterator_run, ++ .protocol_msg_check = scmi_protocol_msg_check, + .fastchannel_init = scmi_common_fastchannel_init, + .fastchannel_db_ring = scmi_common_fastchannel_db_ring, + }; +diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h +index 78e1a01eb656e3..095b14a2d0a3f6 100644 +--- a/drivers/firmware/arm_scmi/protocols.h ++++ b/drivers/firmware/arm_scmi/protocols.h +@@ -29,6 +29,8 @@ + #define PROTOCOL_REV_MAJOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))) + #define PROTOCOL_REV_MINOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x)))) + ++#define MSG_SUPPORTS_FASTCHANNEL(x) ((x) & BIT(0)) ++ + enum scmi_common_cmd { + PROTOCOL_VERSION = 0x0, + PROTOCOL_ATTRIBUTES = 0x1, +@@ -250,6 +252,8 @@ struct scmi_fc_info { + * provided in @ops. + * @iter_response_run: A common helper to trigger the run of a previously + * initialized iterator. ++ * @protocol_msg_check: A common helper to check is a specific protocol message ++ * is supported. + * @fastchannel_init: A common helper used to initialize FC descriptors by + * gathering FC descriptions from the SCMI platform server. + * @fastchannel_db_ring: A common helper to ring a FC doorbell. +@@ -262,6 +266,8 @@ struct scmi_proto_helpers_ops { + unsigned int max_resources, u8 msg_id, + size_t tx_size, void *priv); + int (*iter_response_run)(void *iter); ++ int (*protocol_msg_check)(const struct scmi_protocol_handle *ph, ++ u32 message_id, u32 *attributes); + void (*fastchannel_init)(const struct scmi_protocol_handle *ph, + u8 describe_id, u32 message_id, + u32 valid_size, u32 domain, +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +index 963e106d32eed0..256cc15fc9b5a1 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +@@ -1890,7 +1890,7 @@ static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) + continue; + } + job = to_amdgpu_job(s_job); +- if (preempted && (&job->hw_fence) == fence) ++ if (preempted && (&job->hw_fence.base) == fence) + /* mark the job as preempted */ + job->preemption_status |= AMDGPU_IB_PREEMPTED; + } +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +index f8058dd5356a13..200b59318759da 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +@@ -5367,7 +5367,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, + * + * job->base holds a reference to parent fence + */ +- if (job && dma_fence_is_signaled(&job->hw_fence)) { ++ if (job && dma_fence_is_signaled(&job->hw_fence.base)) { + job_signaled = true; + dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); + goto skip_hw_reset; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +index 7537f5aa76f0c0..017dd494d0a2f6 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +@@ -41,22 +41,6 @@ + #include "amdgpu_trace.h" + #include "amdgpu_reset.h" + +-/* +- * Fences mark an event in the GPUs pipeline and are used +- * for GPU/CPU synchronization. When the fence is written, +- * it is expected that all buffers associated with that fence +- * are no longer in use by the associated ring on the GPU and +- * that the relevant GPU caches have been flushed. +- */ +- +-struct amdgpu_fence { +- struct dma_fence base; +- +- /* RB, DMA, etc. */ +- struct amdgpu_ring *ring; +- ktime_t start_timestamp; +-}; +- + static struct kmem_cache *amdgpu_fence_slab; + + int amdgpu_fence_slab_init(void) +@@ -153,12 +137,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd + am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC); + if (am_fence == NULL) + return -ENOMEM; +- fence = &am_fence->base; +- am_fence->ring = ring; + } else { + /* take use of job-embedded fence */ +- fence = &job->hw_fence; ++ am_fence = &job->hw_fence; + } ++ fence = &am_fence->base; ++ am_fence->ring = ring; + + seq = ++ring->fence_drv.sync_seq; + if (job && job->job_run_counter) { +@@ -719,7 +703,7 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring) + * it right here or we won't be able to track them in fence_drv + * and they will remain unsignaled during sa_bo free. + */ +- job = container_of(old, struct amdgpu_job, hw_fence); ++ job = container_of(old, struct amdgpu_job, hw_fence.base); + if (!job->base.s_fence && !dma_fence_is_signaled(old)) + dma_fence_signal(old); + RCU_INIT_POINTER(*ptr, NULL); +@@ -781,7 +765,7 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) + + static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f) + { +- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); ++ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base); + + return (const char *)to_amdgpu_ring(job->base.sched)->name; + } +@@ -811,7 +795,7 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f) + */ + static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f) + { +- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); ++ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base); + + if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer)) + amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched)); +@@ -846,7 +830,7 @@ static void amdgpu_job_fence_free(struct rcu_head *rcu) + struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); + + /* free job if fence has a parent job */ +- kfree(container_of(f, struct amdgpu_job, hw_fence)); ++ kfree(container_of(f, struct amdgpu_job, hw_fence.base)); + } + + /** +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +index 49a6b6b88843dd..e9adfc88a54ab1 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +@@ -165,8 +165,8 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) + /* Check if any fences where initialized */ + if (job->base.s_fence && job->base.s_fence->finished.ops) + f = &job->base.s_fence->finished; +- else if (job->hw_fence.ops) +- f = &job->hw_fence; ++ else if (job->hw_fence.base.ops) ++ f = &job->hw_fence.base; + else + f = NULL; + +@@ -183,10 +183,10 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job) + amdgpu_sync_free(&job->explicit_sync); + + /* only put the hw fence if has embedded fence */ +- if (!job->hw_fence.ops) ++ if (!job->hw_fence.base.ops) + kfree(job); + else +- dma_fence_put(&job->hw_fence); ++ dma_fence_put(&job->hw_fence.base); + } + + void amdgpu_job_set_gang_leader(struct amdgpu_job *job, +@@ -215,10 +215,10 @@ void amdgpu_job_free(struct amdgpu_job *job) + if (job->gang_submit != &job->base.s_fence->scheduled) + dma_fence_put(job->gang_submit); + +- if (!job->hw_fence.ops) ++ if (!job->hw_fence.base.ops) + kfree(job); + else +- dma_fence_put(&job->hw_fence); ++ dma_fence_put(&job->hw_fence.base); + } + + struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job) +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +index a963a25ddd6209..65b6fbab544e5f 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +@@ -48,7 +48,7 @@ struct amdgpu_job { + struct drm_sched_job base; + struct amdgpu_vm *vm; + struct amdgpu_sync explicit_sync; +- struct dma_fence hw_fence; ++ struct amdgpu_fence hw_fence; + struct dma_fence *gang_submit; + uint32_t preamble_status; + uint32_t preemption_status; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +index e2ab303ad2708e..60f770b99c2c54 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +@@ -123,6 +123,22 @@ struct amdgpu_fence_driver { + struct dma_fence **fences; + }; + ++/* ++ * Fences mark an event in the GPUs pipeline and are used ++ * for GPU/CPU synchronization. When the fence is written, ++ * it is expected that all buffers associated with that fence ++ * are no longer in use by the associated ring on the GPU and ++ * that the relevant GPU caches have been flushed. ++ */ ++ ++struct amdgpu_fence { ++ struct dma_fence base; ++ ++ /* RB, DMA, etc. */ ++ struct amdgpu_ring *ring; ++ ktime_t start_timestamp; ++}; ++ + extern const struct drm_sched_backend_ops amdgpu_sched_ops; + + void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +index bef7541770641c..e9d2fcdde0e1c6 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +@@ -28,6 +28,10 @@ + #include "amdgpu.h" + #include "amdgpu_ucode.h" + ++static const struct kicker_device kicker_device_list[] = { ++ {0x744B, 0x00}, ++}; ++ + static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr) + { + DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes)); +@@ -1268,6 +1272,19 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl + return NULL; + } + ++bool amdgpu_is_kicker_fw(struct amdgpu_device *adev) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(kicker_device_list); i++) { ++ if (adev->pdev->device == kicker_device_list[i].device && ++ adev->pdev->revision == kicker_device_list[i].revision) ++ return true; ++ } ++ ++ return false; ++} ++ + void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len) + { + int maj, min, rev; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +index b03321e7d2d893..4760092aafd723 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +@@ -536,6 +536,11 @@ struct amdgpu_firmware { + uint64_t fw_buf_mc; + }; + ++struct kicker_device{ ++ unsigned short device; ++ u8 revision; ++}; ++ + void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr); + void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr); + void amdgpu_ucode_print_imu_hdr(const struct common_firmware_header *hdr); +@@ -562,5 +567,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type); + const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id); + + void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len); ++bool amdgpu_is_kicker_fw(struct amdgpu_device *adev); + + #endif +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +index c7085a747b03b7..451c37d04e4567 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +@@ -435,7 +435,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, + int r; + + lpfn = (u64)place->lpfn << PAGE_SHIFT; +- if (!lpfn) ++ if (!lpfn || lpfn > man->size) + lpfn = man->size; + + fpfn = (u64)place->fpfn << PAGE_SHIFT; +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c +index 0f58be65132fc0..2b07c0000df6eb 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c +@@ -1287,6 +1287,7 @@ void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid) + user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id); + if (unlikely(user_gpu_id == -EINVAL)) { + WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id); ++ kfd_unref_process(p); + return; + } + +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +index 1a03173e231337..18e82d0da75bcd 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +@@ -225,7 +225,7 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, + + packet->bitfields2.engine_sel = + engine_sel__mes_map_queues__compute_vi; +- packet->bitfields2.gws_control_queue = q->gws ? 1 : 0; ++ packet->bitfields2.gws_control_queue = q->properties.is_gws ? 1 : 0; + packet->bitfields2.extended_engine_sel = + extended_engine_sel__mes_map_queues__legacy_engine_sel; + packet->bitfields2.queue_type = +diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +index ff930a71e496a9..7f8f127e7722de 100644 +--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c ++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +@@ -368,6 +368,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp) + struct mod_hdcp_display *display = get_first_active_display(hdcp); + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + ++ if (!display) ++ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; ++ + mutex_lock(&psp->hdcp_context.mutex); + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); +diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c +index 5f58da6ebaadb4..27e660e92489f6 100644 +--- a/drivers/gpu/drm/ast/ast_mode.c ++++ b/drivers/gpu/drm/ast/ast_mode.c +@@ -1868,9 +1868,9 @@ static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *s + + /* + * Concurrent operations could possibly trigger a call to +- * drm_connector_helper_funcs.get_modes by trying to read the +- * display modes. Protect access to I/O registers by acquiring +- * the I/O-register lock. Released in atomic_flush(). ++ * drm_connector_helper_funcs.get_modes by reading the display ++ * modes. Protect access to registers by acquiring the modeset ++ * lock. + */ + mutex_lock(&ast->ioregs_lock); + drm_atomic_helper_commit_tail_rpm(state); +diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c +index 7457d38622b0c7..89eed0668bfb24 100644 +--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c ++++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c +@@ -568,15 +568,18 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi, + struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy; + unsigned long dsi_hss_hsa_hse_hbp; + unsigned int nlanes = output->dev->lanes; ++ int mode_clock = (mode_valid_check ? mode->clock : mode->crtc_clock); + int ret; + + ret = cdns_dsi_mode2cfg(dsi, mode, dsi_cfg, mode_valid_check); + if (ret) + return ret; + +- phy_mipi_dphy_get_default_config(mode->crtc_clock * 1000, +- mipi_dsi_pixel_format_to_bpp(output->dev->format), +- nlanes, phy_cfg); ++ ret = phy_mipi_dphy_get_default_config(mode_clock * 1000, ++ mipi_dsi_pixel_format_to_bpp(output->dev->format), ++ nlanes, phy_cfg); ++ if (ret) ++ return ret; + + ret = cdns_dsi_adjust_phy_config(dsi, dsi_cfg, phy_cfg, mode, mode_valid_check); + if (ret) +@@ -680,6 +683,11 @@ static void cdns_dsi_bridge_post_disable(struct drm_bridge *bridge) + struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); + struct cdns_dsi *dsi = input_to_dsi(input); + ++ dsi->phy_initialized = false; ++ dsi->link_initialized = false; ++ phy_power_off(dsi->dphy); ++ phy_exit(dsi->dphy); ++ + pm_runtime_put(dsi->base.dev); + } + +@@ -761,7 +769,7 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge) + struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy; + unsigned long tx_byte_period; + struct cdns_dsi_cfg dsi_cfg; +- u32 tmp, reg_wakeup, div; ++ u32 tmp, reg_wakeup, div, status; + int nlanes; + + if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0)) +@@ -778,6 +786,19 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge) + cdns_dsi_hs_init(dsi); + cdns_dsi_init_link(dsi); + ++ /* ++ * Now that the DSI Link and DSI Phy are initialized, ++ * wait for the CLK and Data Lanes to be ready. ++ */ ++ tmp = CLK_LANE_RDY; ++ for (int i = 0; i < nlanes; i++) ++ tmp |= DATA_LANE_RDY(i); ++ ++ if (readl_poll_timeout(dsi->regs + MCTL_MAIN_STS, status, ++ (tmp == (status & tmp)), 100, 500000)) ++ dev_err(dsi->base.dev, ++ "Timed Out: DSI-DPhy Clock and Data Lanes not ready.\n"); ++ + writel(HBP_LEN(dsi_cfg.hbp) | HSA_LEN(dsi_cfg.hsa), + dsi->regs + VID_HSIZE1); + writel(HFP_LEN(dsi_cfg.hfp) | HACT_LEN(dsi_cfg.hact), +@@ -952,7 +973,7 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host, + bridge = drm_panel_bridge_add_typed(panel, + DRM_MODE_CONNECTOR_DSI); + } else { +- bridge = of_drm_find_bridge(dev->dev.of_node); ++ bridge = of_drm_find_bridge(np); + if (!bridge) + bridge = ERR_PTR(-EINVAL); + } +@@ -1152,7 +1173,6 @@ static int __maybe_unused cdns_dsi_suspend(struct device *dev) + clk_disable_unprepare(dsi->dsi_sys_clk); + clk_disable_unprepare(dsi->dsi_p_clk); + reset_control_assert(dsi->dsi_p_rst); +- dsi->link_initialized = false; + return 0; + } + +diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c +index bfbd3fee125671..002f8aaa509bc9 100644 +--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c ++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c +@@ -331,12 +331,18 @@ static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata) + * 200 ms. We'll assume that the panel driver will have the hardcoded + * delay in its prepare and always disable HPD. + * +- * If HPD somehow makes sense on some future panel we'll have to +- * change this to be conditional on someone specifying that HPD should +- * be used. ++ * For DisplayPort bridge type, we need HPD. So we use the bridge type ++ * to conditionally disable HPD. ++ * NOTE: The bridge type is set in ti_sn_bridge_probe() but enable_comms() ++ * can be called before. So for DisplayPort, HPD will be enabled once ++ * bridge type is set. We are using bridge type instead of "no-hpd" ++ * property because it is not used properly in devicetree description ++ * and hence is unreliable. + */ +- regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE, +- HPD_DISABLE); ++ ++ if (pdata->bridge.type != DRM_MODE_CONNECTOR_DisplayPort) ++ regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE, ++ HPD_DISABLE); + + pdata->comms_enabled = true; + +@@ -424,36 +430,8 @@ static int status_show(struct seq_file *s, void *data) + + return 0; + } +- + DEFINE_SHOW_ATTRIBUTE(status); + +-static void ti_sn65dsi86_debugfs_remove(void *data) +-{ +- debugfs_remove_recursive(data); +-} +- +-static void ti_sn65dsi86_debugfs_init(struct ti_sn65dsi86 *pdata) +-{ +- struct device *dev = pdata->dev; +- struct dentry *debugfs; +- int ret; +- +- debugfs = debugfs_create_dir(dev_name(dev), NULL); +- +- /* +- * We might get an error back if debugfs wasn't enabled in the kernel +- * so let's just silently return upon failure. +- */ +- if (IS_ERR_OR_NULL(debugfs)) +- return; +- +- ret = devm_add_action_or_reset(dev, ti_sn65dsi86_debugfs_remove, debugfs); +- if (ret) +- return; +- +- debugfs_create_file("status", 0600, debugfs, pdata, &status_fops); +-} +- + /* ----------------------------------------------------------------------------- + * Auxiliary Devices (*not* AUX) + */ +@@ -1201,9 +1179,14 @@ static enum drm_connector_status ti_sn_bridge_detect(struct drm_bridge *bridge) + struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); + int val = 0; + +- pm_runtime_get_sync(pdata->dev); ++ /* ++ * Runtime reference is grabbed in ti_sn_bridge_hpd_enable() ++ * as the chip won't report HPD just after being powered on. ++ * HPD_DEBOUNCED_STATE reflects correct state only after the ++ * debounce time (~100-400 ms). ++ */ ++ + regmap_read(pdata->regmap, SN_HPD_DISABLE_REG, &val); +- pm_runtime_put_autosuspend(pdata->dev); + + return val & HPD_DEBOUNCED_STATE ? connector_status_connected + : connector_status_disconnected; +@@ -1217,6 +1200,35 @@ static struct edid *ti_sn_bridge_get_edid(struct drm_bridge *bridge, + return drm_get_edid(connector, &pdata->aux.ddc); + } + ++static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *root) ++{ ++ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); ++ struct dentry *debugfs; ++ ++ debugfs = debugfs_create_dir(dev_name(pdata->dev), root); ++ debugfs_create_file("status", 0600, debugfs, pdata, &status_fops); ++} ++ ++static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge) ++{ ++ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); ++ ++ /* ++ * Device needs to be powered on before reading the HPD state ++ * for reliable hpd detection in ti_sn_bridge_detect() due to ++ * the high debounce time. ++ */ ++ ++ pm_runtime_get_sync(pdata->dev); ++} ++ ++static void ti_sn_bridge_hpd_disable(struct drm_bridge *bridge) ++{ ++ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); ++ ++ pm_runtime_put_autosuspend(pdata->dev); ++} ++ + static const struct drm_bridge_funcs ti_sn_bridge_funcs = { + .attach = ti_sn_bridge_attach, + .detach = ti_sn_bridge_detach, +@@ -1230,6 +1242,9 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = { + .atomic_reset = drm_atomic_helper_bridge_reset, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, ++ .debugfs_init = ti_sn65dsi86_debugfs_init, ++ .hpd_enable = ti_sn_bridge_hpd_enable, ++ .hpd_disable = ti_sn_bridge_hpd_disable, + }; + + static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata, +@@ -1318,8 +1333,26 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev, + pdata->bridge.type = pdata->next_bridge->type == DRM_MODE_CONNECTOR_DisplayPort + ? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP; + +- if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort) +- pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT; ++ if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort) { ++ pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT | ++ DRM_BRIDGE_OP_HPD; ++ /* ++ * If comms were already enabled they would have been enabled ++ * with the wrong value of HPD_DISABLE. Update it now. Comms ++ * could be enabled if anyone is holding a pm_runtime reference ++ * (like if a GPIO is in use). Note that in most cases nobody ++ * is doing AUX channel xfers before the bridge is added so ++ * HPD doesn't _really_ matter then. The only exception is in ++ * the eDP case where the panel wants to read the EDID before ++ * the bridge is added. We always consistently have HPD disabled ++ * for eDP. ++ */ ++ mutex_lock(&pdata->comms_mutex); ++ if (pdata->comms_enabled) ++ regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, ++ HPD_DISABLE, 0); ++ mutex_unlock(&pdata->comms_mutex); ++ }; + + drm_bridge_add(&pdata->bridge); + +@@ -1935,8 +1968,6 @@ static int ti_sn65dsi86_probe(struct i2c_client *client) + if (ret) + return ret; + +- ti_sn65dsi86_debugfs_init(pdata); +- + /* + * Break ourselves up into a collection of aux devices. The only real + * motiviation here is to solve the chicken-and-egg problem of probe +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c +index 97e406d9ac06f4..a3bd396c9d829d 100644 +--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c ++++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c +@@ -34,6 +34,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job + *sched_job) + { + struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); ++ struct drm_gpu_scheduler *sched = sched_job->sched; + struct etnaviv_gpu *gpu = submit->gpu; + u32 dma_addr; + int change; +@@ -76,7 +77,9 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job + return DRM_GPU_SCHED_STAT_NOMINAL; + + out_no_timeout: +- list_add(&sched_job->list, &sched_job->sched->pending_list); ++ spin_lock(&sched->job_list_lock); ++ list_add(&sched_job->list, &sched->pending_list); ++ spin_unlock(&sched->job_list_lock); + return DRM_GPU_SCHED_STAT_NOMINAL; + } + +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +index 5a687a3686bd53..023b2ea74c3601 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +@@ -2013,7 +2013,7 @@ static int eb_capture_stage(struct i915_execbuffer *eb) + continue; + + if (i915_gem_context_is_recoverable(eb->gem_context) && +- (IS_DGFX(eb->i915) || GRAPHICS_VER_FULL(eb->i915) > IP_VER(12, 0))) ++ GRAPHICS_VER_FULL(eb->i915) > IP_VER(12, 10)) + return -EINVAL; + + for_each_batch_create_order(eb, j) { +diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c +index 33ab82c334a881..461aafc2ae9afa 100644 +--- a/drivers/gpu/drm/i915/i915_pmu.c ++++ b/drivers/gpu/drm/i915/i915_pmu.c +@@ -101,7 +101,7 @@ static unsigned int config_bit(const u64 config) + return other_bit(config); + } + +-static u32 config_mask(const u64 config) ++static __always_inline u32 config_mask(const u64 config) + { + unsigned int bit = config_bit(config); + +diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c +index 6970b0f7f457c8..2e1d5c3432728c 100644 +--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c ++++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c +@@ -156,6 +156,7 @@ void msm_devfreq_init(struct msm_gpu *gpu) + priv->gpu_devfreq_config.downdifferential = 10; + + mutex_init(&df->lock); ++ df->suspended = true; + + ret = dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq, + DEV_PM_QOS_MIN_FREQUENCY, 0); +diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c +index 53130a50584ca0..eed3b8bed9e40f 100644 +--- a/drivers/gpu/drm/scheduler/sched_entity.c ++++ b/drivers/gpu/drm/scheduler/sched_entity.c +@@ -167,6 +167,7 @@ static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) + { + struct drm_sched_job *job = container_of(wrk, typeof(*job), work); + ++ drm_sched_fence_scheduled(job->s_fence, NULL); + drm_sched_fence_finished(job->s_fence, -ESRCH); + WARN_ON(job->s_fence->parent); + job->sched->ops->free_job(job); +diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c +index 13b182ab905fb0..980d85bc7f3745 100644 +--- a/drivers/gpu/drm/tegra/dc.c ++++ b/drivers/gpu/drm/tegra/dc.c +@@ -1320,10 +1320,16 @@ static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm, + if (wgrp->dc == dc->pipe) { + for (j = 0; j < wgrp->num_windows; j++) { + unsigned int index = wgrp->windows[j]; ++ enum drm_plane_type type; ++ ++ if (primary) ++ type = DRM_PLANE_TYPE_OVERLAY; ++ else ++ type = DRM_PLANE_TYPE_PRIMARY; + + plane = tegra_shared_plane_create(drm, dc, + wgrp->index, +- index); ++ index, type); + if (IS_ERR(plane)) + return plane; + +@@ -1331,10 +1337,8 @@ static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm, + * Choose the first shared plane owned by this + * head as the primary plane. + */ +- if (!primary) { +- plane->type = DRM_PLANE_TYPE_PRIMARY; ++ if (!primary) + primary = plane; +- } + } + } + } +@@ -1388,7 +1392,10 @@ static void tegra_crtc_reset(struct drm_crtc *crtc) + if (crtc->state) + tegra_crtc_atomic_destroy_state(crtc, crtc->state); + +- __drm_atomic_helper_crtc_reset(crtc, &state->base); ++ if (state) ++ __drm_atomic_helper_crtc_reset(crtc, &state->base); ++ else ++ __drm_atomic_helper_crtc_reset(crtc, NULL); + } + + static struct drm_crtc_state * +diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c +index 1af5f8318d9146..0f88cbb3331706 100644 +--- a/drivers/gpu/drm/tegra/hub.c ++++ b/drivers/gpu/drm/tegra/hub.c +@@ -756,9 +756,9 @@ static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = { + struct drm_plane *tegra_shared_plane_create(struct drm_device *drm, + struct tegra_dc *dc, + unsigned int wgrp, +- unsigned int index) ++ unsigned int index, ++ enum drm_plane_type type) + { +- enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY; + struct tegra_drm *tegra = drm->dev_private; + struct tegra_display_hub *hub = tegra->hub; + struct tegra_shared_plane *plane; +diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h +index 23c4b2115ed1e3..a66f18c4facc9d 100644 +--- a/drivers/gpu/drm/tegra/hub.h ++++ b/drivers/gpu/drm/tegra/hub.h +@@ -80,7 +80,8 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub); + struct drm_plane *tegra_shared_plane_create(struct drm_device *drm, + struct tegra_dc *dc, + unsigned int wgrp, +- unsigned int index); ++ unsigned int index, ++ enum drm_plane_type type); + + int tegra_display_hub_atomic_check(struct drm_device *drm, + struct drm_atomic_state *state); +diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c +index 594bc472862fe6..b0361ef53c4635 100644 +--- a/drivers/gpu/drm/tiny/cirrus.c ++++ b/drivers/gpu/drm/tiny/cirrus.c +@@ -318,7 +318,6 @@ static void cirrus_pitch_set(struct cirrus_device *cirrus, unsigned int pitch) + /* Enable extended blanking and pitch bits, and enable full memory */ + cr1b = 0x22; + cr1b |= (pitch >> 7) & 0x10; +- cr1b |= (pitch >> 6) & 0x40; + wreg_crt(cirrus, 0x1b, cr1b); + + cirrus_set_start_address(cirrus, 0); +diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c +index 1506094a800983..ac357ab1aee544 100644 +--- a/drivers/gpu/drm/udl/udl_drv.c ++++ b/drivers/gpu/drm/udl/udl_drv.c +@@ -126,9 +126,9 @@ static void udl_usb_disconnect(struct usb_interface *interface) + { + struct drm_device *dev = usb_get_intfdata(interface); + ++ drm_dev_unplug(dev); + drm_kms_helper_poll_fini(dev); + udl_drop_usb(dev); +- drm_dev_unplug(dev); + } + + /* +diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c +index a4062f617ba202..ee65da98c7d5b5 100644 +--- a/drivers/hid/hid-lenovo.c ++++ b/drivers/hid/hid-lenovo.c +@@ -529,11 +529,14 @@ static void lenovo_features_set_cptkbd(struct hid_device *hdev) + + /* + * Tell the keyboard a driver understands it, and turn F7, F9, F11 into +- * regular keys ++ * regular keys (Compact only) + */ +- ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03); +- if (ret) +- hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret); ++ if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD || ++ hdev->product == USB_DEVICE_ID_LENOVO_CBTKBD) { ++ ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03); ++ if (ret) ++ hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret); ++ } + + /* Switch middle button to native mode */ + ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01); +diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c +index 5a72cf8d6944fa..52011503ff3bac 100644 +--- a/drivers/hid/wacom_sys.c ++++ b/drivers/hid/wacom_sys.c +@@ -2012,14 +2012,18 @@ static int wacom_initialize_remotes(struct wacom *wacom) + + remote->remote_dir = kobject_create_and_add("wacom_remote", + &wacom->hdev->dev.kobj); +- if (!remote->remote_dir) ++ if (!remote->remote_dir) { ++ kfifo_free(&remote->remote_fifo); + return -ENOMEM; ++ } + + error = sysfs_create_files(remote->remote_dir, remote_unpair_attrs); + + if (error) { + hid_err(wacom->hdev, + "cannot create sysfs group err: %d\n", error); ++ kfifo_free(&remote->remote_fifo); ++ kobject_put(remote->remote_dir); + return error; + } + +diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c +index 2f4d09ce027a3f..3c6011a48dabe7 100644 +--- a/drivers/hv/channel_mgmt.c ++++ b/drivers/hv/channel_mgmt.c +@@ -120,7 +120,9 @@ const struct vmbus_device vmbus_devs[] = { + }, + + /* File copy */ +- { .dev_type = HV_FCOPY, ++ /* fcopy always uses 16KB ring buffer size and is working well for last many years */ ++ { .pref_ring_size = 0x4000, ++ .dev_type = HV_FCOPY, + HV_FCOPY_GUID, + .perf_device = false, + .allowed_in_isolated = false, +@@ -140,12 +142,19 @@ const struct vmbus_device vmbus_devs[] = { + .allowed_in_isolated = false, + }, + +- /* Unknown GUID */ +- { .dev_type = HV_UNKNOWN, ++ /* ++ * Unknown GUID ++ * 64 KB ring buffer + 4 KB header should be sufficient size for any Hyper-V device apart ++ * from HV_NIC and HV_SCSI. This case avoid the fallback for unknown devices to allocate ++ * much bigger (2 MB) of ring size. ++ */ ++ { .pref_ring_size = 0x11000, ++ .dev_type = HV_UNKNOWN, + .perf_device = false, + .allowed_in_isolated = false, + }, + }; ++EXPORT_SYMBOL_GPL(vmbus_devs); + + static const struct { + guid_t guid; +diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h +index 787b1506864188..34b60009114a69 100644 +--- a/drivers/hv/hyperv_vmbus.h ++++ b/drivers/hv/hyperv_vmbus.h +@@ -419,6 +419,11 @@ static inline bool hv_is_perf_channel(struct vmbus_channel *channel) + return vmbus_devs[channel->device_id].perf_device; + } + ++static inline size_t hv_dev_ring_size(struct vmbus_channel *channel) ++{ ++ return vmbus_devs[channel->device_id].pref_ring_size; ++} ++ + static inline bool hv_is_allocated_cpu(unsigned int cpu) + { + struct vmbus_channel *channel, *sc; +diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c +index fe7f6b1b09851e..e14be8ebaad30e 100644 +--- a/drivers/hwmon/pmbus/max34440.c ++++ b/drivers/hwmon/pmbus/max34440.c +@@ -34,16 +34,21 @@ enum chips { max34440, max34441, max34446, max34451, max34460, max34461 }; + /* + * The whole max344* family have IOUT_OC_WARN_LIMIT and IOUT_OC_FAULT_LIMIT + * swapped from the standard pmbus spec addresses. ++ * For max34451, version MAX34451ETNA6+ and later has this issue fixed. + */ + #define MAX34440_IOUT_OC_WARN_LIMIT 0x46 + #define MAX34440_IOUT_OC_FAULT_LIMIT 0x4A + ++#define MAX34451ETNA6_MFR_REV 0x0012 ++ + #define MAX34451_MFR_CHANNEL_CONFIG 0xe4 + #define MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK 0x3f + + struct max34440_data { + int id; + struct pmbus_driver_info info; ++ u8 iout_oc_warn_limit; ++ u8 iout_oc_fault_limit; + }; + + #define to_max34440_data(x) container_of(x, struct max34440_data, info) +@@ -60,11 +65,11 @@ static int max34440_read_word_data(struct i2c_client *client, int page, + switch (reg) { + case PMBUS_IOUT_OC_FAULT_LIMIT: + ret = pmbus_read_word_data(client, page, phase, +- MAX34440_IOUT_OC_FAULT_LIMIT); ++ data->iout_oc_fault_limit); + break; + case PMBUS_IOUT_OC_WARN_LIMIT: + ret = pmbus_read_word_data(client, page, phase, +- MAX34440_IOUT_OC_WARN_LIMIT); ++ data->iout_oc_warn_limit); + break; + case PMBUS_VIRT_READ_VOUT_MIN: + ret = pmbus_read_word_data(client, page, phase, +@@ -133,11 +138,11 @@ static int max34440_write_word_data(struct i2c_client *client, int page, + + switch (reg) { + case PMBUS_IOUT_OC_FAULT_LIMIT: +- ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_FAULT_LIMIT, ++ ret = pmbus_write_word_data(client, page, data->iout_oc_fault_limit, + word); + break; + case PMBUS_IOUT_OC_WARN_LIMIT: +- ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_WARN_LIMIT, ++ ret = pmbus_write_word_data(client, page, data->iout_oc_warn_limit, + word); + break; + case PMBUS_VIRT_RESET_POUT_HISTORY: +@@ -235,6 +240,25 @@ static int max34451_set_supported_funcs(struct i2c_client *client, + */ + + int page, rv; ++ bool max34451_na6 = false; ++ ++ rv = i2c_smbus_read_word_data(client, PMBUS_MFR_REVISION); ++ if (rv < 0) ++ return rv; ++ ++ if (rv >= MAX34451ETNA6_MFR_REV) { ++ max34451_na6 = true; ++ data->info.format[PSC_VOLTAGE_IN] = direct; ++ data->info.format[PSC_CURRENT_IN] = direct; ++ data->info.m[PSC_VOLTAGE_IN] = 1; ++ data->info.b[PSC_VOLTAGE_IN] = 0; ++ data->info.R[PSC_VOLTAGE_IN] = 3; ++ data->info.m[PSC_CURRENT_IN] = 1; ++ data->info.b[PSC_CURRENT_IN] = 0; ++ data->info.R[PSC_CURRENT_IN] = 2; ++ data->iout_oc_fault_limit = PMBUS_IOUT_OC_FAULT_LIMIT; ++ data->iout_oc_warn_limit = PMBUS_IOUT_OC_WARN_LIMIT; ++ } + + for (page = 0; page < 16; page++) { + rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page); +@@ -251,16 +275,30 @@ static int max34451_set_supported_funcs(struct i2c_client *client, + case 0x20: + data->info.func[page] = PMBUS_HAVE_VOUT | + PMBUS_HAVE_STATUS_VOUT; ++ ++ if (max34451_na6) ++ data->info.func[page] |= PMBUS_HAVE_VIN | ++ PMBUS_HAVE_STATUS_INPUT; + break; + case 0x21: + data->info.func[page] = PMBUS_HAVE_VOUT; ++ ++ if (max34451_na6) ++ data->info.func[page] |= PMBUS_HAVE_VIN; + break; + case 0x22: + data->info.func[page] = PMBUS_HAVE_IOUT | + PMBUS_HAVE_STATUS_IOUT; ++ ++ if (max34451_na6) ++ data->info.func[page] |= PMBUS_HAVE_IIN | ++ PMBUS_HAVE_STATUS_INPUT; + break; + case 0x23: + data->info.func[page] = PMBUS_HAVE_IOUT; ++ ++ if (max34451_na6) ++ data->info.func[page] |= PMBUS_HAVE_IIN; + break; + default: + break; +@@ -494,6 +532,8 @@ static int max34440_probe(struct i2c_client *client) + return -ENOMEM; + data->id = i2c_match_id(max34440_id, client)->driver_data; + data->info = max34440_info[data->id]; ++ data->iout_oc_fault_limit = MAX34440_IOUT_OC_FAULT_LIMIT; ++ data->iout_oc_warn_limit = MAX34440_IOUT_OC_WARN_LIMIT; + + if (data->id == max34451) { + rv = max34451_set_supported_funcs(client, data); +diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c +index 783e259c376121..3b57851869eaae 100644 +--- a/drivers/hwtracing/coresight/coresight-core.c ++++ b/drivers/hwtracing/coresight/coresight-core.c +@@ -135,7 +135,8 @@ coresight_find_out_connection(struct coresight_device *src_dev, + + static inline u32 coresight_read_claim_tags(struct coresight_device *csdev) + { +- return csdev_access_relaxed_read32(&csdev->access, CORESIGHT_CLAIMCLR); ++ return FIELD_GET(CORESIGHT_CLAIM_MASK, ++ csdev_access_relaxed_read32(&csdev->access, CORESIGHT_CLAIMCLR)); + } + + static inline bool coresight_is_claimed_self_hosted(struct coresight_device *csdev) +diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h +index 30c051055e54b3..b758a42ed8c734 100644 +--- a/drivers/hwtracing/coresight/coresight-priv.h ++++ b/drivers/hwtracing/coresight/coresight-priv.h +@@ -32,6 +32,7 @@ + * Coresight device CLAIM protocol. + * See PSCI - ARM DEN 0022D, Section: 6.8.1 Debug and Trace save and restore. + */ ++#define CORESIGHT_CLAIM_MASK GENMASK(1, 0) + #define CORESIGHT_CLAIM_SELF_HOSTED BIT(1) + + #define TIMEOUT_US 100 +diff --git a/drivers/i2c/busses/i2c-robotfuzz-osif.c b/drivers/i2c/busses/i2c-robotfuzz-osif.c +index 66dfa211e736b1..8e4cf9028b2342 100644 +--- a/drivers/i2c/busses/i2c-robotfuzz-osif.c ++++ b/drivers/i2c/busses/i2c-robotfuzz-osif.c +@@ -111,6 +111,11 @@ static u32 osif_func(struct i2c_adapter *adapter) + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; + } + ++/* prevent invalid 0-length usb_control_msg */ ++static const struct i2c_adapter_quirks osif_quirks = { ++ .flags = I2C_AQ_NO_ZERO_LEN_READ, ++}; ++ + static const struct i2c_algorithm osif_algorithm = { + .master_xfer = osif_xfer, + .functionality = osif_func, +@@ -143,6 +148,7 @@ static int osif_probe(struct usb_interface *interface, + + priv->adapter.owner = THIS_MODULE; + priv->adapter.class = I2C_CLASS_HWMON; ++ priv->adapter.quirks = &osif_quirks; + priv->adapter.algo = &osif_algorithm; + priv->adapter.algo_data = priv; + snprintf(priv->adapter.name, sizeof(priv->adapter.name), +diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c +index 1bffe36c40ad89..d984b90b352701 100644 +--- a/drivers/i2c/busses/i2c-tiny-usb.c ++++ b/drivers/i2c/busses/i2c-tiny-usb.c +@@ -140,6 +140,11 @@ static u32 usb_func(struct i2c_adapter *adapter) + return ret; + } + ++/* prevent invalid 0-length usb_control_msg */ ++static const struct i2c_adapter_quirks usb_quirks = { ++ .flags = I2C_AQ_NO_ZERO_LEN_READ, ++}; ++ + /* This is the actual algorithm we define */ + static const struct i2c_algorithm usb_algorithm = { + .master_xfer = usb_xfer, +@@ -248,6 +253,7 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface, + /* setup i2c adapter description */ + dev->adapter.owner = THIS_MODULE; + dev->adapter.class = I2C_CLASS_HWMON; ++ dev->adapter.quirks = &usb_quirks; + dev->adapter.algo = &usb_algorithm; + dev->adapter.algo_data = dev; + snprintf(dev->adapter.name, sizeof(dev->adapter.name), +diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c +index 7e21928707437b..533667eefe419c 100644 +--- a/drivers/iio/adc/ad_sigma_delta.c ++++ b/drivers/iio/adc/ad_sigma_delta.c +@@ -476,6 +476,10 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p) + * byte set to zero. */ + ad_sd_read_reg_raw(sigma_delta, data_reg, transfer_size, &data[1]); + break; ++ ++ default: ++ dev_err_ratelimited(&indio_dev->dev, "Unsupported reg_size: %u\n", reg_size); ++ goto irq_handled; + } + + /* +diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c +index ef1d0349f4247d..a1c694199c9896 100644 +--- a/drivers/iio/pressure/zpa2326.c ++++ b/drivers/iio/pressure/zpa2326.c +@@ -582,7 +582,7 @@ static int zpa2326_fill_sample_buffer(struct iio_dev *indio_dev, + struct { + u32 pressure; + u16 temperature; +- u64 timestamp; ++ aligned_s64 timestamp; + } sample; + int err; + +diff --git a/drivers/leds/led-class-multicolor.c b/drivers/leds/led-class-multicolor.c +index ec62a48116135c..e0785935f4ba67 100644 +--- a/drivers/leds/led-class-multicolor.c ++++ b/drivers/leds/led-class-multicolor.c +@@ -61,7 +61,8 @@ static ssize_t multi_intensity_store(struct device *dev, + for (i = 0; i < mcled_cdev->num_colors; i++) + mcled_cdev->subled_info[i].intensity = intensity_value[i]; + +- led_set_brightness(led_cdev, led_cdev->brightness); ++ if (!test_bit(LED_BLINK_SW, &led_cdev->work_flags)) ++ led_set_brightness(led_cdev, led_cdev->brightness); + ret = size; + err_out: + mutex_unlock(&led_cdev->led_access); +diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c +index f13d705f7861af..cb59b4dbad6269 100644 +--- a/drivers/mailbox/mailbox.c ++++ b/drivers/mailbox/mailbox.c +@@ -500,8 +500,8 @@ void mbox_free_channel(struct mbox_chan *chan) + if (chan->txdone_method == TXDONE_BY_ACK) + chan->txdone_method = TXDONE_BY_POLL; + +- module_put(chan->mbox->dev->driver->owner); + spin_unlock_irqrestore(&chan->lock, flags); ++ module_put(chan->mbox->dev->driver->owner); + } + EXPORT_SYMBOL_GPL(mbox_free_channel); + +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c +index 8440b56e385d5c..b9dfebaa9eae55 100644 +--- a/drivers/md/bcache/super.c ++++ b/drivers/md/bcache/super.c +@@ -1739,7 +1739,12 @@ static void cache_set_flush(struct closure *cl) + mutex_unlock(&b->write_lock); + } + +- if (ca->alloc_thread) ++ /* ++ * If the register_cache_set() call to bch_cache_set_alloc() failed, ++ * ca has not been assigned a value and return error. ++ * So we need check ca is not NULL during bch_cache_set_unregister(). ++ */ ++ if (ca && ca->alloc_thread) + kthread_stop(ca->alloc_thread); + + if (c->journal.cur) { +diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c +index 385e24f55ec002..f23edd79df45e5 100644 +--- a/drivers/md/dm-raid.c ++++ b/drivers/md/dm-raid.c +@@ -2378,7 +2378,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) + */ + sb_retrieve_failed_devices(sb, failed_devices); + rdev_for_each(r, mddev) { +- if (test_bit(Journal, &rdev->flags) || ++ if (test_bit(Journal, &r->flags) || + !r->sb_page) + continue; + sb2 = page_address(r->sb_page); +diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c +index 8317e07b326d0d..21decb97bc050b 100644 +--- a/drivers/md/md-bitmap.c ++++ b/drivers/md/md-bitmap.c +@@ -589,7 +589,7 @@ static int md_bitmap_new_disk_sb(struct bitmap *bitmap) + * is a good choice? We choose COUNTER_MAX / 2 arbitrarily. + */ + write_behind = bitmap->mddev->bitmap_info.max_write_behind; +- if (write_behind > COUNTER_MAX) ++ if (write_behind > COUNTER_MAX / 2) + write_behind = COUNTER_MAX / 2; + sb->write_behind = cpu_to_le32(write_behind); + bitmap->mddev->bitmap_info.max_write_behind = write_behind; +diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c +index 59e21746f55073..bd90d8bacd5ef1 100644 +--- a/drivers/media/usb/uvc/uvc_ctrl.c ++++ b/drivers/media/usb/uvc/uvc_ctrl.c +@@ -1801,7 +1801,7 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev, + unsigned int processed_ctrls = 0; + struct uvc_control *ctrl; + unsigned int i; +- int ret; ++ int ret = 0; + + if (entity == NULL) + return 0; +@@ -1830,8 +1830,6 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev, + dev->intfnum, ctrl->info.selector, + uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT), + ctrl->info.size); +- else +- ret = 0; + + if (!ret) + processed_ctrls++; +@@ -1843,17 +1841,25 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev, + + ctrl->dirty = 0; + +- if (ret < 0) { ++ if (!rollback && handle && ++ ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS) ++ uvc_ctrl_set_handle(handle, ctrl, handle); ++ ++ if (ret < 0 && !rollback) { + if (err_ctrl) + *err_ctrl = ctrl; +- return ret; ++ /* ++ * If we fail to set a control, we need to rollback ++ * the next ones. ++ */ ++ rollback = 1; + } + +- if (!rollback && handle && +- ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS) +- uvc_ctrl_set_handle(handle, ctrl, handle); + } + ++ if (ret) ++ return ret; ++ + return processed_ctrls; + } + +@@ -1884,7 +1890,8 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback, + struct uvc_video_chain *chain = handle->chain; + struct uvc_control *err_ctrl; + struct uvc_entity *entity; +- int ret = 0; ++ int ret_out = 0; ++ int ret; + + /* Find the control. */ + list_for_each_entry(entity, &chain->entities, chain) { +@@ -1895,17 +1902,23 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback, + ctrls->error_idx = + uvc_ctrl_find_ctrl_idx(entity, ctrls, + err_ctrl); +- goto done; ++ /* ++ * When we fail to commit an entity, we need to ++ * restore the UVC_CTRL_DATA_BACKUP for all the ++ * controls in the other entities, otherwise our cache ++ * and the hardware will be out of sync. ++ */ ++ rollback = 1; ++ ++ ret_out = ret; + } else if (ret > 0 && !rollback) { + uvc_ctrl_send_events(handle, entity, + ctrls->controls, ctrls->count); + } + } + +- ret = 0; +-done: + mutex_unlock(&chain->ctrl_mutex); +- return ret; ++ return ret_out; + } + + int uvc_ctrl_get(struct uvc_video_chain *chain, +diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c +index 1f4f5002595c0a..17672eeb1732a2 100644 +--- a/drivers/mfd/max14577.c ++++ b/drivers/mfd/max14577.c +@@ -463,6 +463,7 @@ static void max14577_i2c_remove(struct i2c_client *i2c) + { + struct max14577 *max14577 = i2c_get_clientdata(i2c); + ++ device_init_wakeup(max14577->dev, false); + mfd_remove_devices(max14577->dev); + regmap_del_irq_chip(max14577->irq, max14577->irq_data); + if (max14577->dev_type == MAXIM_DEVICE_TYPE_MAX77836) +diff --git a/drivers/misc/tps6594-pfsm.c b/drivers/misc/tps6594-pfsm.c +index 88dcac8148922c..71fbe31542e562 100644 +--- a/drivers/misc/tps6594-pfsm.c ++++ b/drivers/misc/tps6594-pfsm.c +@@ -260,6 +260,9 @@ static int tps6594_pfsm_probe(struct platform_device *pdev) + pfsm->miscdev.minor = MISC_DYNAMIC_MINOR; + pfsm->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "pfsm-%ld-0x%02x", + tps->chip_id, tps->reg); ++ if (!pfsm->miscdev.name) ++ return -ENOMEM; ++ + pfsm->miscdev.fops = &tps6594_pfsm_fops; + pfsm->miscdev.parent = dev->parent; + +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h +index 1619943fb2637a..4e8881b479e487 100644 +--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h ++++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h +@@ -485,7 +485,7 @@ static inline u64 _enetc_rd_reg64(void __iomem *reg) + tmp = ioread32(reg + 4); + } while (high != tmp); + +- return le64_to_cpu((__le64)high << 32 | low); ++ return (u64)high << 32 | low; + } + #endif + +diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c +index c019fe964eceaf..97c6b4d2763433 100644 +--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c ++++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c +@@ -2368,7 +2368,7 @@ static int wx_alloc_page_pool(struct wx_ring *rx_ring) + struct page_pool_params pp_params = { + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .order = 0, +- .pool_size = rx_ring->size, ++ .pool_size = rx_ring->count, + .nid = dev_to_node(rx_ring->dev), + .dev = rx_ring->dev, + .dma_dir = DMA_FROM_DEVICE, +diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c +index 4ce31f9f069475..5cf050e562b734 100644 +--- a/drivers/nvme/host/ioctl.c ++++ b/drivers/nvme/host/ioctl.c +@@ -526,16 +526,14 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, + pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64); + + /* +- * For iopoll, complete it directly. +- * Otherwise, move the completion to task work. ++ * IOPOLL could potentially complete this request directly, but ++ * if multiple rings are polling on the same queue, then it's possible ++ * for one ring to find completions for another ring. Punting the ++ * completion via task_work will always direct it to the right ++ * location, rather than potentially complete requests for ringA ++ * under iopoll invocations from ringB. + */ +- if (blk_rq_is_poll(req)) { +- WRITE_ONCE(ioucmd->cookie, NULL); +- nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED); +- } else { +- io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb); +- } +- ++ io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb); + return RQ_END_IO_FREE; + } + +diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c +index 2b60d20dfdf59d..717af1b757f0a5 100644 +--- a/drivers/pci/controller/dwc/pcie-designware.c ++++ b/drivers/pci/controller/dwc/pcie-designware.c +@@ -748,22 +748,19 @@ static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes) + /* Set link width speed control register */ + lwsc = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); + lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK; ++ lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES; + switch (num_lanes) { + case 1: + plc |= PORT_LINK_MODE_1_LANES; +- lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES; + break; + case 2: + plc |= PORT_LINK_MODE_2_LANES; +- lwsc |= PORT_LOGIC_LINK_WIDTH_2_LANES; + break; + case 4: + plc |= PORT_LINK_MODE_4_LANES; +- lwsc |= PORT_LOGIC_LINK_WIDTH_4_LANES; + break; + case 8: + plc |= PORT_LINK_MODE_8_LANES; +- lwsc |= PORT_LOGIC_LINK_WIDTH_8_LANES; + break; + default: + dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes); +diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c +index 7e6bd63a6425e6..8dfea64a51e0f8 100644 +--- a/drivers/pci/controller/pcie-apple.c ++++ b/drivers/pci/controller/pcie-apple.c +@@ -585,6 +585,9 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie, + list_add_tail(&port->entry, &pcie->ports); + init_completion(&pcie->event); + ++ /* In the success path, we keep a reference to np around */ ++ of_node_get(np); ++ + ret = apple_pcie_port_register_irqs(port); + WARN_ON(ret); + +@@ -764,7 +767,6 @@ static int apple_pcie_init(struct pci_config_window *cfg) + { + struct device *dev = cfg->parent; + struct platform_device *platform = to_platform_device(dev); +- struct device_node *of_port; + struct apple_pcie *pcie; + int ret; + +@@ -787,11 +789,10 @@ static int apple_pcie_init(struct pci_config_window *cfg) + if (ret) + return ret; + +- for_each_child_of_node(dev->of_node, of_port) { ++ for_each_available_child_of_node_scoped(dev->of_node, of_port) { + ret = apple_pcie_setup_port(pcie, of_port); + if (ret) { + dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret); +- of_node_put(of_port); + return ret; + } + } +diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig +index 07eea525091b08..1fd57eb867662a 100644 +--- a/drivers/platform/x86/Kconfig ++++ b/drivers/platform/x86/Kconfig +@@ -466,6 +466,7 @@ config LENOVO_YMC + tristate "Lenovo Yoga Tablet Mode Control" + depends on ACPI_WMI + depends on INPUT ++ depends on IDEAPAD_LAPTOP + select INPUT_SPARSEKMAP + help + This driver maps the Tablet Mode Control switch to SW_TABLET_MODE input +diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c +index 50013af0537c36..e84fcb444d872a 100644 +--- a/drivers/platform/x86/ideapad-laptop.c ++++ b/drivers/platform/x86/ideapad-laptop.c +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -21,6 +22,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -86,6 +88,34 @@ enum { + SALS_FNLOCK_OFF = 0xf, + }; + ++enum { ++ VPCCMD_R_VPC1 = 0x10, ++ VPCCMD_R_BL_MAX, ++ VPCCMD_R_BL, ++ VPCCMD_W_BL, ++ VPCCMD_R_WIFI, ++ VPCCMD_W_WIFI, ++ VPCCMD_R_BT, ++ VPCCMD_W_BT, ++ VPCCMD_R_BL_POWER, ++ VPCCMD_R_NOVO, ++ VPCCMD_R_VPC2, ++ VPCCMD_R_TOUCHPAD, ++ VPCCMD_W_TOUCHPAD, ++ VPCCMD_R_CAMERA, ++ VPCCMD_W_CAMERA, ++ VPCCMD_R_3G, ++ VPCCMD_W_3G, ++ VPCCMD_R_ODD, /* 0x21 */ ++ VPCCMD_W_FAN, ++ VPCCMD_R_RF, ++ VPCCMD_W_RF, ++ VPCCMD_W_YMC = 0x2A, ++ VPCCMD_R_FAN = 0x2B, ++ VPCCMD_R_SPECIAL_BUTTONS = 0x31, ++ VPCCMD_W_BL_POWER = 0x33, ++}; ++ + /* + * These correspond to the number of supported states - 1 + * Future keyboard types may need a new system, if there's a collision +@@ -145,6 +175,7 @@ struct ideapad_private { + bool touchpad_ctrl_via_ec : 1; + bool ctrl_ps2_aux_port : 1; + bool usb_charging : 1; ++ bool ymc_ec_trigger : 1; + } features; + struct { + bool initialized; +@@ -188,6 +219,12 @@ MODULE_PARM_DESC(touchpad_ctrl_via_ec, + "Enable registering a 'touchpad' sysfs-attribute which can be used to manually " + "tell the EC to enable/disable the touchpad. This may not work on all models."); + ++static bool ymc_ec_trigger __read_mostly; ++module_param(ymc_ec_trigger, bool, 0444); ++MODULE_PARM_DESC(ymc_ec_trigger, ++ "Enable EC triggering work-around to force emitting tablet mode events. " ++ "If you need this please report this to: platform-driver-x86@vger.kernel.org"); ++ + /* + * shared data + */ +@@ -227,6 +264,21 @@ static void ideapad_shared_exit(struct ideapad_private *priv) + /* + * ACPI Helpers + */ ++#define IDEAPAD_EC_TIMEOUT 200 /* in ms */ ++ ++/* ++ * Some models (e.g., ThinkBook since 2024) have a low tolerance for being ++ * polled too frequently. Doing so may break the state machine in the EC, ++ * resulting in a hard shutdown. ++ * ++ * It is also observed that frequent polls may disturb the ongoing operation ++ * and notably delay the availability of EC response. ++ * ++ * These values are used as the delay before the first poll and the interval ++ * between subsequent polls to solve the above issues. ++ */ ++#define IDEAPAD_EC_POLL_MIN_US 150 ++#define IDEAPAD_EC_POLL_MAX_US 300 + + static int eval_int(acpi_handle handle, const char *name, unsigned long *res) + { +@@ -242,6 +294,29 @@ static int eval_int(acpi_handle handle, const char *name, unsigned long *res) + return 0; + } + ++static int eval_int_with_arg(acpi_handle handle, const char *name, unsigned long arg, ++ unsigned long *res) ++{ ++ struct acpi_object_list params; ++ unsigned long long result; ++ union acpi_object in_obj; ++ acpi_status status; ++ ++ params.count = 1; ++ params.pointer = &in_obj; ++ in_obj.type = ACPI_TYPE_INTEGER; ++ in_obj.integer.value = arg; ++ ++ status = acpi_evaluate_integer(handle, (char *)name, ¶ms, &result); ++ if (ACPI_FAILURE(status)) ++ return -EIO; ++ ++ if (res) ++ *res = result; ++ ++ return 0; ++} ++ + static int exec_simple_method(acpi_handle handle, const char *name, unsigned long arg) + { + acpi_status status = acpi_execute_simple_method(handle, (char *)name, arg); +@@ -284,6 +359,89 @@ static int eval_dytc(acpi_handle handle, unsigned long cmd, unsigned long *res) + return eval_int_with_arg(handle, "DYTC", cmd, res); + } + ++static int eval_vpcr(acpi_handle handle, unsigned long cmd, unsigned long *res) ++{ ++ return eval_int_with_arg(handle, "VPCR", cmd, res); ++} ++ ++static int eval_vpcw(acpi_handle handle, unsigned long cmd, unsigned long data) ++{ ++ struct acpi_object_list params; ++ union acpi_object in_obj[2]; ++ acpi_status status; ++ ++ params.count = 2; ++ params.pointer = in_obj; ++ in_obj[0].type = ACPI_TYPE_INTEGER; ++ in_obj[0].integer.value = cmd; ++ in_obj[1].type = ACPI_TYPE_INTEGER; ++ in_obj[1].integer.value = data; ++ ++ status = acpi_evaluate_object(handle, "VPCW", ¶ms, NULL); ++ if (ACPI_FAILURE(status)) ++ return -EIO; ++ ++ return 0; ++} ++ ++static int read_ec_data(acpi_handle handle, unsigned long cmd, unsigned long *data) ++{ ++ unsigned long end_jiffies, val; ++ int err; ++ ++ err = eval_vpcw(handle, 1, cmd); ++ if (err) ++ return err; ++ ++ end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1; ++ ++ while (time_before(jiffies, end_jiffies)) { ++ usleep_range(IDEAPAD_EC_POLL_MIN_US, IDEAPAD_EC_POLL_MAX_US); ++ ++ err = eval_vpcr(handle, 1, &val); ++ if (err) ++ return err; ++ ++ if (val == 0) ++ return eval_vpcr(handle, 0, data); ++ } ++ ++ acpi_handle_err(handle, "timeout in %s\n", __func__); ++ ++ return -ETIMEDOUT; ++} ++ ++static int write_ec_cmd(acpi_handle handle, unsigned long cmd, unsigned long data) ++{ ++ unsigned long end_jiffies, val; ++ int err; ++ ++ err = eval_vpcw(handle, 0, data); ++ if (err) ++ return err; ++ ++ err = eval_vpcw(handle, 1, cmd); ++ if (err) ++ return err; ++ ++ end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1; ++ ++ while (time_before(jiffies, end_jiffies)) { ++ usleep_range(IDEAPAD_EC_POLL_MIN_US, IDEAPAD_EC_POLL_MAX_US); ++ ++ err = eval_vpcr(handle, 1, &val); ++ if (err) ++ return err; ++ ++ if (val == 0) ++ return 0; ++ } ++ ++ acpi_handle_err(handle, "timeout in %s\n", __func__); ++ ++ return -ETIMEDOUT; ++} ++ + /* + * debugfs + */ +@@ -1501,6 +1659,79 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv, bool send_ + priv->r_touchpad_val = value; + } + ++static const struct dmi_system_id ymc_ec_trigger_quirk_dmi_table[] = { ++ { ++ /* Lenovo Yoga 7 14ARB7 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "82QF"), ++ }, ++ }, ++ { ++ /* Lenovo Yoga 7 14ACN6 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "82N7"), ++ }, ++ }, ++ { } ++}; ++ ++static void ideapad_laptop_trigger_ec(void) ++{ ++ struct ideapad_private *priv; ++ int ret; ++ ++ guard(mutex)(&ideapad_shared_mutex); ++ ++ priv = ideapad_shared; ++ if (!priv) ++ return; ++ ++ if (!priv->features.ymc_ec_trigger) ++ return; ++ ++ ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_YMC, 1); ++ if (ret) ++ dev_warn(&priv->platform_device->dev, "Could not write YMC: %d\n", ret); ++} ++ ++static int ideapad_laptop_nb_notify(struct notifier_block *nb, ++ unsigned long action, void *data) ++{ ++ switch (action) { ++ case IDEAPAD_LAPTOP_YMC_EVENT: ++ ideapad_laptop_trigger_ec(); ++ break; ++ } ++ ++ return 0; ++} ++ ++static struct notifier_block ideapad_laptop_notifier = { ++ .notifier_call = ideapad_laptop_nb_notify, ++}; ++ ++static BLOCKING_NOTIFIER_HEAD(ideapad_laptop_chain_head); ++ ++int ideapad_laptop_register_notifier(struct notifier_block *nb) ++{ ++ return blocking_notifier_chain_register(&ideapad_laptop_chain_head, nb); ++} ++EXPORT_SYMBOL_NS_GPL(ideapad_laptop_register_notifier, IDEAPAD_LAPTOP); ++ ++int ideapad_laptop_unregister_notifier(struct notifier_block *nb) ++{ ++ return blocking_notifier_chain_unregister(&ideapad_laptop_chain_head, nb); ++} ++EXPORT_SYMBOL_NS_GPL(ideapad_laptop_unregister_notifier, IDEAPAD_LAPTOP); ++ ++void ideapad_laptop_call_notifier(unsigned long action, void *data) ++{ ++ blocking_notifier_call_chain(&ideapad_laptop_chain_head, action, data); ++} ++EXPORT_SYMBOL_NS_GPL(ideapad_laptop_call_notifier, IDEAPAD_LAPTOP); ++ + static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) + { + struct ideapad_private *priv = data; +@@ -1637,6 +1868,8 @@ static void ideapad_check_features(struct ideapad_private *priv) + priv->features.ctrl_ps2_aux_port = + ctrl_ps2_aux_port || dmi_check_system(ctrl_ps2_aux_port_list); + priv->features.touchpad_ctrl_via_ec = touchpad_ctrl_via_ec; ++ priv->features.ymc_ec_trigger = ++ ymc_ec_trigger || dmi_check_system(ymc_ec_trigger_quirk_dmi_table); + + if (!read_ec_data(handle, VPCCMD_R_FAN, &val)) + priv->features.fan_mode = true; +@@ -1872,6 +2105,8 @@ static int ideapad_acpi_add(struct platform_device *pdev) + if (err) + goto shared_init_failed; + ++ ideapad_laptop_register_notifier(&ideapad_laptop_notifier); ++ + return 0; + + shared_init_failed: +@@ -1903,6 +2138,8 @@ static void ideapad_acpi_remove(struct platform_device *pdev) + struct ideapad_private *priv = dev_get_drvdata(&pdev->dev); + int i; + ++ ideapad_laptop_unregister_notifier(&ideapad_laptop_notifier); ++ + ideapad_shared_exit(priv); + + acpi_remove_notify_handler(priv->adev->handle, +diff --git a/drivers/platform/x86/ideapad-laptop.h b/drivers/platform/x86/ideapad-laptop.h +index 4498a96de59769..1e52f2aa0aac38 100644 +--- a/drivers/platform/x86/ideapad-laptop.h ++++ b/drivers/platform/x86/ideapad-laptop.h +@@ -9,144 +9,14 @@ + #ifndef _IDEAPAD_LAPTOP_H_ + #define _IDEAPAD_LAPTOP_H_ + +-#include +-#include +-#include ++#include + +-enum { +- VPCCMD_R_VPC1 = 0x10, +- VPCCMD_R_BL_MAX, +- VPCCMD_R_BL, +- VPCCMD_W_BL, +- VPCCMD_R_WIFI, +- VPCCMD_W_WIFI, +- VPCCMD_R_BT, +- VPCCMD_W_BT, +- VPCCMD_R_BL_POWER, +- VPCCMD_R_NOVO, +- VPCCMD_R_VPC2, +- VPCCMD_R_TOUCHPAD, +- VPCCMD_W_TOUCHPAD, +- VPCCMD_R_CAMERA, +- VPCCMD_W_CAMERA, +- VPCCMD_R_3G, +- VPCCMD_W_3G, +- VPCCMD_R_ODD, /* 0x21 */ +- VPCCMD_W_FAN, +- VPCCMD_R_RF, +- VPCCMD_W_RF, +- VPCCMD_W_YMC = 0x2A, +- VPCCMD_R_FAN = 0x2B, +- VPCCMD_R_SPECIAL_BUTTONS = 0x31, +- VPCCMD_W_BL_POWER = 0x33, ++enum ideapad_laptop_notifier_actions { ++ IDEAPAD_LAPTOP_YMC_EVENT, + }; + +-static inline int eval_int_with_arg(acpi_handle handle, const char *name, unsigned long arg, unsigned long *res) +-{ +- struct acpi_object_list params; +- unsigned long long result; +- union acpi_object in_obj; +- acpi_status status; ++int ideapad_laptop_register_notifier(struct notifier_block *nb); ++int ideapad_laptop_unregister_notifier(struct notifier_block *nb); ++void ideapad_laptop_call_notifier(unsigned long action, void *data); + +- params.count = 1; +- params.pointer = &in_obj; +- in_obj.type = ACPI_TYPE_INTEGER; +- in_obj.integer.value = arg; +- +- status = acpi_evaluate_integer(handle, (char *)name, ¶ms, &result); +- if (ACPI_FAILURE(status)) +- return -EIO; +- +- if (res) +- *res = result; +- +- return 0; +-} +- +-static inline int eval_vpcr(acpi_handle handle, unsigned long cmd, unsigned long *res) +-{ +- return eval_int_with_arg(handle, "VPCR", cmd, res); +-} +- +-static inline int eval_vpcw(acpi_handle handle, unsigned long cmd, unsigned long data) +-{ +- struct acpi_object_list params; +- union acpi_object in_obj[2]; +- acpi_status status; +- +- params.count = 2; +- params.pointer = in_obj; +- in_obj[0].type = ACPI_TYPE_INTEGER; +- in_obj[0].integer.value = cmd; +- in_obj[1].type = ACPI_TYPE_INTEGER; +- in_obj[1].integer.value = data; +- +- status = acpi_evaluate_object(handle, "VPCW", ¶ms, NULL); +- if (ACPI_FAILURE(status)) +- return -EIO; +- +- return 0; +-} +- +-#define IDEAPAD_EC_TIMEOUT 200 /* in ms */ +- +-static inline int read_ec_data(acpi_handle handle, unsigned long cmd, unsigned long *data) +-{ +- unsigned long end_jiffies, val; +- int err; +- +- err = eval_vpcw(handle, 1, cmd); +- if (err) +- return err; +- +- end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1; +- +- while (time_before(jiffies, end_jiffies)) { +- schedule(); +- +- err = eval_vpcr(handle, 1, &val); +- if (err) +- return err; +- +- if (val == 0) +- return eval_vpcr(handle, 0, data); +- } +- +- acpi_handle_err(handle, "timeout in %s\n", __func__); +- +- return -ETIMEDOUT; +-} +- +-static inline int write_ec_cmd(acpi_handle handle, unsigned long cmd, unsigned long data) +-{ +- unsigned long end_jiffies, val; +- int err; +- +- err = eval_vpcw(handle, 0, data); +- if (err) +- return err; +- +- err = eval_vpcw(handle, 1, cmd); +- if (err) +- return err; +- +- end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1; +- +- while (time_before(jiffies, end_jiffies)) { +- schedule(); +- +- err = eval_vpcr(handle, 1, &val); +- if (err) +- return err; +- +- if (val == 0) +- return 0; +- } +- +- acpi_handle_err(handle, "timeout in %s\n", __func__); +- +- return -ETIMEDOUT; +-} +- +-#undef IDEAPAD_EC_TIMEOUT + #endif /* !_IDEAPAD_LAPTOP_H_ */ +diff --git a/drivers/platform/x86/lenovo-ymc.c b/drivers/platform/x86/lenovo-ymc.c +index ef2c267ab485cd..bd9f95404c7cb0 100644 +--- a/drivers/platform/x86/lenovo-ymc.c ++++ b/drivers/platform/x86/lenovo-ymc.c +@@ -20,32 +20,10 @@ + #define LENOVO_YMC_QUERY_INSTANCE 0 + #define LENOVO_YMC_QUERY_METHOD 0x01 + +-static bool ec_trigger __read_mostly; +-module_param(ec_trigger, bool, 0444); +-MODULE_PARM_DESC(ec_trigger, "Enable EC triggering work-around to force emitting tablet mode events"); +- + static bool force; + module_param(force, bool, 0444); + MODULE_PARM_DESC(force, "Force loading on boards without a convertible DMI chassis-type"); + +-static const struct dmi_system_id ec_trigger_quirk_dmi_table[] = { +- { +- /* Lenovo Yoga 7 14ARB7 */ +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), +- DMI_MATCH(DMI_PRODUCT_NAME, "82QF"), +- }, +- }, +- { +- /* Lenovo Yoga 7 14ACN6 */ +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), +- DMI_MATCH(DMI_PRODUCT_NAME, "82N7"), +- }, +- }, +- { } +-}; +- + static const struct dmi_system_id allowed_chasis_types_dmi_table[] = { + { + .matches = { +@@ -62,21 +40,8 @@ static const struct dmi_system_id allowed_chasis_types_dmi_table[] = { + + struct lenovo_ymc_private { + struct input_dev *input_dev; +- struct acpi_device *ec_acpi_dev; + }; + +-static void lenovo_ymc_trigger_ec(struct wmi_device *wdev, struct lenovo_ymc_private *priv) +-{ +- int err; +- +- if (!priv->ec_acpi_dev) +- return; +- +- err = write_ec_cmd(priv->ec_acpi_dev->handle, VPCCMD_W_YMC, 1); +- if (err) +- dev_warn(&wdev->dev, "Could not write YMC: %d\n", err); +-} +- + static const struct key_entry lenovo_ymc_keymap[] = { + /* Ignore the uninitialized state */ + { KE_IGNORE, 0x00 }, +@@ -127,11 +92,9 @@ static void lenovo_ymc_notify(struct wmi_device *wdev, union acpi_object *data) + + free_obj: + kfree(obj); +- lenovo_ymc_trigger_ec(wdev, priv); ++ ideapad_laptop_call_notifier(IDEAPAD_LAPTOP_YMC_EVENT, &code); + } + +-static void acpi_dev_put_helper(void *p) { acpi_dev_put(p); } +- + static int lenovo_ymc_probe(struct wmi_device *wdev, const void *ctx) + { + struct lenovo_ymc_private *priv; +@@ -145,29 +108,10 @@ static int lenovo_ymc_probe(struct wmi_device *wdev, const void *ctx) + return -ENODEV; + } + +- ec_trigger |= dmi_check_system(ec_trigger_quirk_dmi_table); +- + priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + +- if (ec_trigger) { +- pr_debug("Lenovo YMC enable EC triggering.\n"); +- priv->ec_acpi_dev = acpi_dev_get_first_match_dev("VPC2004", NULL, -1); +- +- if (!priv->ec_acpi_dev) { +- dev_err(&wdev->dev, "Could not find EC ACPI device.\n"); +- return -ENODEV; +- } +- err = devm_add_action_or_reset(&wdev->dev, +- acpi_dev_put_helper, priv->ec_acpi_dev); +- if (err) { +- dev_err(&wdev->dev, +- "Could not clean up EC ACPI device: %d\n", err); +- return err; +- } +- } +- + input_dev = devm_input_allocate_device(&wdev->dev); + if (!input_dev) + return -ENOMEM; +@@ -194,7 +138,6 @@ static int lenovo_ymc_probe(struct wmi_device *wdev, const void *ctx) + dev_set_drvdata(&wdev->dev, priv); + + /* Report the state for the first time on probe */ +- lenovo_ymc_trigger_ec(wdev, priv); + lenovo_ymc_notify(wdev, NULL); + return 0; + } +@@ -219,3 +162,4 @@ module_wmi_driver(lenovo_ymc_driver); + MODULE_AUTHOR("Gergo Koteles "); + MODULE_DESCRIPTION("Lenovo Yoga Mode Control driver"); + MODULE_LICENSE("GPL"); ++MODULE_IMPORT_NS(IDEAPAD_LAPTOP); +diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c +index 70fcb5c40cfe32..21fc6151af79f5 100644 +--- a/drivers/s390/crypto/pkey_api.c ++++ b/drivers/s390/crypto/pkey_api.c +@@ -1333,7 +1333,7 @@ static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns) + if (!uapqns || nr_apqns == 0) + return NULL; + +- return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn)); ++ return memdup_array_user(uapqns, nr_apqns, sizeof(struct pkey_apqn)); + } + + static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c +index dd3630b09aa241..54f66142e14a82 100644 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c +@@ -5908,7 +5908,11 @@ megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance *instance) + const struct cpumask *mask; + + if (instance->perf_mode == MR_BALANCED_PERF_MODE) { +- mask = cpumask_of_node(dev_to_node(&instance->pdev->dev)); ++ int nid = dev_to_node(&instance->pdev->dev); ++ ++ if (nid == NUMA_NO_NODE) ++ nid = 0; ++ mask = cpumask_of_node(nid); + + for (i = 0; i < instance->low_latency_index_start; i++) { + irq = pci_irq_vector(instance->pdev, i); +diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c +index bf9b816637d02e..9285a683324f4f 100644 +--- a/drivers/spi/spi-cadence-quadspi.c ++++ b/drivers/spi/spi-cadence-quadspi.c +@@ -1868,6 +1868,13 @@ static int cqspi_probe(struct platform_device *pdev) + goto probe_setup_failed; + } + ++ pm_runtime_enable(dev); ++ ++ if (cqspi->rx_chan) { ++ dma_release_channel(cqspi->rx_chan); ++ goto probe_setup_failed; ++ } ++ + ret = spi_register_controller(host); + if (ret) { + dev_err(&pdev->dev, "failed to register SPI ctlr %d\n", ret); +@@ -1877,6 +1884,7 @@ static int cqspi_probe(struct platform_device *pdev) + return 0; + probe_setup_failed: + cqspi_controller_enable(cqspi, 0); ++ pm_runtime_disable(dev); + probe_reset_failed: + if (cqspi->is_jh7110) + cqspi_jh7110_disable_clk(pdev, cqspi); +@@ -1898,7 +1906,8 @@ static void cqspi_remove(struct platform_device *pdev) + if (cqspi->rx_chan) + dma_release_channel(cqspi->rx_chan); + +- clk_disable_unprepare(cqspi->clk); ++ if (pm_runtime_get_sync(&pdev->dev) >= 0) ++ clk_disable(cqspi->clk); + + if (cqspi->is_jh7110) + cqspi_jh7110_disable_clk(pdev, cqspi); +diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c +index 7ecdaa2eeaf33e..83fdee444c1c03 100644 +--- a/drivers/staging/rtl8723bs/core/rtw_security.c ++++ b/drivers/staging/rtl8723bs/core/rtw_security.c +@@ -869,29 +869,21 @@ static signed int aes_cipher(u8 *key, uint hdrlen, + num_blocks, payload_index; + + u8 pn_vector[6]; +- u8 mic_iv[16]; +- u8 mic_header1[16]; +- u8 mic_header2[16]; +- u8 ctr_preload[16]; ++ u8 mic_iv[16] = {}; ++ u8 mic_header1[16] = {}; ++ u8 mic_header2[16] = {}; ++ u8 ctr_preload[16] = {}; + + /* Intermediate Buffers */ +- u8 chain_buffer[16]; +- u8 aes_out[16]; +- u8 padded_buffer[16]; ++ u8 chain_buffer[16] = {}; ++ u8 aes_out[16] = {}; ++ u8 padded_buffer[16] = {}; + u8 mic[8]; + uint frtype = GetFrameType(pframe); + uint frsubtype = GetFrameSubType(pframe); + + frsubtype = frsubtype>>4; + +- memset((void *)mic_iv, 0, 16); +- memset((void *)mic_header1, 0, 16); +- memset((void *)mic_header2, 0, 16); +- memset((void *)ctr_preload, 0, 16); +- memset((void *)chain_buffer, 0, 16); +- memset((void *)aes_out, 0, 16); +- memset((void *)padded_buffer, 0, 16); +- + if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN)) + a4_exists = 0; + else +@@ -1081,15 +1073,15 @@ static signed int aes_decipher(u8 *key, uint hdrlen, + num_blocks, payload_index; + signed int res = _SUCCESS; + u8 pn_vector[6]; +- u8 mic_iv[16]; +- u8 mic_header1[16]; +- u8 mic_header2[16]; +- u8 ctr_preload[16]; ++ u8 mic_iv[16] = {}; ++ u8 mic_header1[16] = {}; ++ u8 mic_header2[16] = {}; ++ u8 ctr_preload[16] = {}; + + /* Intermediate Buffers */ +- u8 chain_buffer[16]; +- u8 aes_out[16]; +- u8 padded_buffer[16]; ++ u8 chain_buffer[16] = {}; ++ u8 aes_out[16] = {}; ++ u8 padded_buffer[16] = {}; + u8 mic[8]; + + uint frtype = GetFrameType(pframe); +@@ -1097,14 +1089,6 @@ static signed int aes_decipher(u8 *key, uint hdrlen, + + frsubtype = frsubtype>>4; + +- memset((void *)mic_iv, 0, 16); +- memset((void *)mic_header1, 0, 16); +- memset((void *)mic_header2, 0, 16); +- memset((void *)ctr_preload, 0, 16); +- memset((void *)chain_buffer, 0, 16); +- memset((void *)aes_out, 0, 16); +- memset((void *)padded_buffer, 0, 16); +- + /* start to decrypt the payload */ + + num_blocks = (plen-8) / 16; /* plen including LLC, payload_length and mic) */ +diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c +index 04809b781f45be..60d48d857b1c00 100644 +--- a/drivers/tty/serial/imx.c ++++ b/drivers/tty/serial/imx.c +@@ -234,6 +234,7 @@ struct imx_port { + enum imx_tx_state tx_state; + struct hrtimer trigger_start_tx; + struct hrtimer trigger_stop_tx; ++ unsigned int rxtl; + }; + + struct imx_port_ucrs { +@@ -1337,6 +1338,7 @@ static void imx_uart_clear_rx_errors(struct imx_port *sport) + + #define TXTL_DEFAULT 8 + #define RXTL_DEFAULT 8 /* 8 characters or aging timer */ ++#define RXTL_CONSOLE_DEFAULT 1 + #define TXTL_DMA 8 /* DMA burst setting */ + #define RXTL_DMA 9 /* DMA burst setting */ + +@@ -1449,7 +1451,7 @@ static void imx_uart_disable_dma(struct imx_port *sport) + ucr1 &= ~(UCR1_RXDMAEN | UCR1_TXDMAEN | UCR1_ATDMAEN); + imx_uart_writel(sport, ucr1, UCR1); + +- imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); ++ imx_uart_setup_ufcr(sport, TXTL_DEFAULT, sport->rxtl); + + sport->dma_is_enabled = 0; + } +@@ -1474,7 +1476,12 @@ static int imx_uart_startup(struct uart_port *port) + return retval; + } + +- imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); ++ if (uart_console(&sport->port)) ++ sport->rxtl = RXTL_CONSOLE_DEFAULT; ++ else ++ sport->rxtl = RXTL_DEFAULT; ++ ++ imx_uart_setup_ufcr(sport, TXTL_DEFAULT, sport->rxtl); + + /* disable the DREN bit (Data Ready interrupt enable) before + * requesting IRQs +@@ -1887,7 +1894,7 @@ static int imx_uart_poll_init(struct uart_port *port) + if (retval) + clk_disable_unprepare(sport->clk_ipg); + +- imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); ++ imx_uart_setup_ufcr(sport, TXTL_DEFAULT, sport->rxtl); + + spin_lock_irqsave(&sport->port.lock, flags); + +@@ -1979,7 +1986,7 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio + /* If the receiver trigger is 0, set it to a default value */ + ufcr = imx_uart_readl(sport, UFCR); + if ((ufcr & UFCR_RXTL_MASK) == 0) +- imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); ++ imx_uart_setup_ufcr(sport, TXTL_DEFAULT, sport->rxtl); + imx_uart_start_rx(port); + } + +@@ -2164,7 +2171,7 @@ imx_uart_console_setup(struct console *co, char *options) + else + imx_uart_console_get_options(sport, &baud, &parity, &bits); + +- imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); ++ imx_uart_setup_ufcr(sport, TXTL_DEFAULT, sport->rxtl); + + retval = uart_set_options(&sport->port, co, baud, parity, bits, flow); + +diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c +index b225a78f6175f3..9f39bafa7fa968 100644 +--- a/drivers/tty/serial/uartlite.c ++++ b/drivers/tty/serial/uartlite.c +@@ -872,16 +872,6 @@ static int ulite_probe(struct platform_device *pdev) + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + +- if (!ulite_uart_driver.state) { +- dev_dbg(&pdev->dev, "uartlite: calling uart_register_driver()\n"); +- ret = uart_register_driver(&ulite_uart_driver); +- if (ret < 0) { +- dev_err(&pdev->dev, "Failed to register driver\n"); +- clk_disable_unprepare(pdata->clk); +- return ret; +- } +- } +- + ret = ulite_assign(&pdev->dev, id, res->start, irq, pdata); + + pm_runtime_mark_last_busy(&pdev->dev); +@@ -922,16 +912,25 @@ static struct platform_driver ulite_platform_driver = { + + static int __init ulite_init(void) + { ++ int ret; ++ ++ pr_debug("uartlite: calling uart_register_driver()\n"); ++ ret = uart_register_driver(&ulite_uart_driver); ++ if (ret) ++ return ret; + + pr_debug("uartlite: calling platform_driver_register()\n"); +- return platform_driver_register(&ulite_platform_driver); ++ ret = platform_driver_register(&ulite_platform_driver); ++ if (ret) ++ uart_unregister_driver(&ulite_uart_driver); ++ ++ return ret; + } + + static void __exit ulite_exit(void) + { + platform_driver_unregister(&ulite_platform_driver); +- if (ulite_uart_driver.state) +- uart_unregister_driver(&ulite_uart_driver); ++ uart_unregister_driver(&ulite_uart_driver); + } + + module_init(ulite_init); +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c +index 6bd1a7785e888c..c5ec7306aa7130 100644 +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -962,7 +962,7 @@ void redraw_screen(struct vc_data *vc, int is_switch) + } + + if (redraw) { +- int update; ++ bool update; + int old_was_color = vc->vc_can_do_color; + + set_origin(vc); +@@ -999,7 +999,7 @@ int vc_cons_allocated(unsigned int i) + return (i < MAX_NR_CONSOLES && vc_cons[i].d); + } + +-static void visual_init(struct vc_data *vc, int num, int init) ++static void visual_init(struct vc_data *vc, int num, bool init) + { + /* ++Geert: vc->vc_sw->con_init determines console size */ + if (vc->vc_sw) +@@ -1083,7 +1083,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ + vc->port.ops = &vc_port_ops; + INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); + +- visual_init(vc, currcons, 1); ++ visual_init(vc, currcons, true); + + if (!*vc->uni_pagedict_loc) + con_set_default_unimap(vc); +@@ -1582,7 +1582,7 @@ static void csi_X(struct vc_data *vc, unsigned int vpar) + vc_uniscr_clear_line(vc, vc->state.x, count); + scr_memsetw((unsigned short *)vc->vc_pos, vc->vc_video_erase_char, 2 * count); + if (con_should_update(vc)) +- vc->vc_sw->con_clear(vc, vc->state.y, vc->state.x, 1, count); ++ vc->vc_sw->con_clear(vc, vc->state.y, vc->state.x, count); + vc->vc_need_wrap = 0; + } + +@@ -3474,7 +3474,7 @@ static int __init con_init(void) + vc_cons[currcons].d = vc = kzalloc(sizeof(struct vc_data), GFP_NOWAIT); + INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); + tty_port_init(&vc->port); +- visual_init(vc, currcons, 1); ++ visual_init(vc, currcons, true); + /* Assuming vc->vc_{cols,rows,screenbuf_size} are sane here. */ + vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT); + vc_init(vc, currcons || !vc->vc_sw->con_save_screen); +@@ -3642,7 +3642,7 @@ static int do_bind_con_driver(const struct consw *csw, int first, int last, + old_was_color = vc->vc_can_do_color; + vc->vc_sw->con_deinit(vc); + vc->vc_origin = (unsigned long)vc->vc_screenbuf; +- visual_init(vc, i, 0); ++ visual_init(vc, i, false); + set_origin(vc); + update_attr(vc); + +diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c +index 9dabc03675b00a..412931cf240f64 100644 +--- a/drivers/ufs/core/ufshcd.c ++++ b/drivers/ufs/core/ufshcd.c +@@ -1284,6 +1284,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) + * make sure that there are no outstanding requests when + * clock scaling is in progress + */ ++ mutex_lock(&hba->host->scan_mutex); + blk_mq_quiesce_tagset(&hba->host->tag_set); + mutex_lock(&hba->wb_mutex); + down_write(&hba->clk_scaling_lock); +@@ -1294,6 +1295,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) + up_write(&hba->clk_scaling_lock); + mutex_unlock(&hba->wb_mutex); + blk_mq_unquiesce_tagset(&hba->host->tag_set); ++ mutex_unlock(&hba->host->scan_mutex); + goto out; + } + +@@ -1315,6 +1317,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc + mutex_unlock(&hba->wb_mutex); + + blk_mq_unquiesce_tagset(&hba->host->tag_set); ++ mutex_unlock(&hba->host->scan_mutex); + ufshcd_release(hba); + } + +diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c +index 2804a4f749750a..17c1f85f2b7ba2 100644 +--- a/drivers/uio/uio_hv_generic.c ++++ b/drivers/uio/uio_hv_generic.c +@@ -249,6 +249,7 @@ hv_uio_probe(struct hv_device *dev, + struct hv_uio_private_data *pdata; + void *ring_buffer; + int ret; ++ size_t ring_size = hv_dev_ring_size(channel); + + /* Communicating with host has to be via shared memory not hypercall */ + if (!channel->offermsg.monitor_allocated) { +@@ -256,12 +257,17 @@ hv_uio_probe(struct hv_device *dev, + return -ENOTSUPP; + } + ++ if (!ring_size) ++ ring_size = HV_RING_SIZE * PAGE_SIZE; ++ ++ /* Adjust ring size if necessary to have it page aligned */ ++ ring_size = VMBUS_RING_SIZE(ring_size); ++ + pdata = devm_kzalloc(&dev->device, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + +- ret = vmbus_alloc_ring(channel, HV_RING_SIZE * PAGE_SIZE, +- HV_RING_SIZE * PAGE_SIZE); ++ ret = vmbus_alloc_ring(channel, ring_size, ring_size); + if (ret) + return ret; + +diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c +index 559c121f092300..600ad9412c146b 100644 +--- a/drivers/usb/class/cdc-wdm.c ++++ b/drivers/usb/class/cdc-wdm.c +@@ -92,7 +92,6 @@ struct wdm_device { + u16 wMaxCommand; + u16 wMaxPacketSize; + __le16 inum; +- int reslength; + int length; + int read; + int count; +@@ -214,6 +213,11 @@ static void wdm_in_callback(struct urb *urb) + if (desc->rerr == 0 && status != -EPIPE) + desc->rerr = status; + ++ if (length == 0) { ++ dev_dbg(&desc->intf->dev, "received ZLP\n"); ++ goto skip_zlp; ++ } ++ + if (length + desc->length > desc->wMaxCommand) { + /* The buffer would overflow */ + set_bit(WDM_OVERFLOW, &desc->flags); +@@ -222,18 +226,18 @@ static void wdm_in_callback(struct urb *urb) + if (!test_bit(WDM_OVERFLOW, &desc->flags)) { + memmove(desc->ubuf + desc->length, desc->inbuf, length); + desc->length += length; +- desc->reslength = length; + } + } + skip_error: + + if (desc->rerr) { + /* +- * Since there was an error, userspace may decide to not read +- * any data after poll'ing. ++ * If there was a ZLP or an error, userspace may decide to not ++ * read any data after poll'ing. + * We should respond to further attempts from the device to send + * data, so that we can get unstuck. + */ ++skip_zlp: + schedule_work(&desc->service_outs_intr); + } else { + set_bit(WDM_READ, &desc->flags); +@@ -585,15 +589,6 @@ static ssize_t wdm_read + goto retry; + } + +- if (!desc->reslength) { /* zero length read */ +- dev_dbg(&desc->intf->dev, "zero length - clearing WDM_READ\n"); +- clear_bit(WDM_READ, &desc->flags); +- rv = service_outstanding_interrupt(desc); +- spin_unlock_irq(&desc->iuspin); +- if (rv < 0) +- goto err; +- goto retry; +- } + cntr = desc->length; + spin_unlock_irq(&desc->iuspin); + } +@@ -1016,7 +1011,7 @@ static void service_interrupt_work(struct work_struct *work) + + spin_lock_irq(&desc->iuspin); + service_outstanding_interrupt(desc); +- if (!desc->resp_count) { ++ if (!desc->resp_count && (desc->length || desc->rerr)) { + set_bit(WDM_READ, &desc->flags); + wake_up(&desc->wait); + } +diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c +index 501e8bc9738eba..1096a884c8d705 100644 +--- a/drivers/usb/common/usb-conn-gpio.c ++++ b/drivers/usb/common/usb-conn-gpio.c +@@ -20,6 +20,9 @@ + #include + #include + #include ++#include ++ ++static DEFINE_IDA(usb_conn_ida); + + #define USB_GPIO_DEB_MS 20 /* ms */ + #define USB_GPIO_DEB_US ((USB_GPIO_DEB_MS) * 1000) /* us */ +@@ -29,6 +32,7 @@ + + struct usb_conn_info { + struct device *dev; ++ int conn_id; /* store the IDA-allocated ID */ + struct usb_role_switch *role_sw; + enum usb_role last_role; + struct regulator *vbus; +@@ -160,7 +164,17 @@ static int usb_conn_psy_register(struct usb_conn_info *info) + .of_node = dev->of_node, + }; + +- desc->name = "usb-charger"; ++ info->conn_id = ida_alloc(&usb_conn_ida, GFP_KERNEL); ++ if (info->conn_id < 0) ++ return info->conn_id; ++ ++ desc->name = devm_kasprintf(dev, GFP_KERNEL, "usb-charger-%d", ++ info->conn_id); ++ if (!desc->name) { ++ ida_free(&usb_conn_ida, info->conn_id); ++ return -ENOMEM; ++ } ++ + desc->properties = usb_charger_properties; + desc->num_properties = ARRAY_SIZE(usb_charger_properties); + desc->get_property = usb_charger_get_property; +@@ -168,8 +182,10 @@ static int usb_conn_psy_register(struct usb_conn_info *info) + cfg.drv_data = info; + + info->charger = devm_power_supply_register(dev, desc, &cfg); +- if (IS_ERR(info->charger)) +- dev_err(dev, "Unable to register charger\n"); ++ if (IS_ERR(info->charger)) { ++ dev_err(dev, "Unable to register charger %d\n", info->conn_id); ++ ida_free(&usb_conn_ida, info->conn_id); ++ } + + return PTR_ERR_OR_ZERO(info->charger); + } +@@ -277,6 +293,9 @@ static void usb_conn_remove(struct platform_device *pdev) + + cancel_delayed_work_sync(&info->dw_det); + ++ if (info->charger) ++ ida_free(&usb_conn_ida, info->conn_id); ++ + if (info->last_role == USB_ROLE_HOST && info->vbus) + regulator_disable(info->vbus); + +diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c +index 2a938cf47ccd62..da6d5e5f79e7a5 100644 +--- a/drivers/usb/core/usb.c ++++ b/drivers/usb/core/usb.c +@@ -695,15 +695,16 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent, + device_set_of_node_from_dev(&dev->dev, bus->sysdev); + dev_set_name(&dev->dev, "usb%d", bus->busnum); + } else { ++ int n; ++ + /* match any labeling on the hubs; it's one-based */ + if (parent->devpath[0] == '0') { +- snprintf(dev->devpath, sizeof dev->devpath, +- "%d", port1); ++ n = snprintf(dev->devpath, sizeof(dev->devpath), "%d", port1); + /* Root ports are not counted in route string */ + dev->route = 0; + } else { +- snprintf(dev->devpath, sizeof dev->devpath, +- "%s.%d", parent->devpath, port1); ++ n = snprintf(dev->devpath, sizeof(dev->devpath), "%s.%d", ++ parent->devpath, port1); + /* Route string assumes hubs have less than 16 ports */ + if (port1 < 15) + dev->route = parent->route + +@@ -712,6 +713,11 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent, + dev->route = parent->route + + (15 << ((parent->level - 1)*4)); + } ++ if (n >= sizeof(dev->devpath)) { ++ usb_put_hcd(bus_to_hcd(bus)); ++ usb_put_dev(dev); ++ return NULL; ++ } + + dev->dev.parent = &parent->dev; + dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath); +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c +index ce20c06a902531..c0db3c52831a2d 100644 +--- a/drivers/usb/dwc2/gadget.c ++++ b/drivers/usb/dwc2/gadget.c +@@ -4601,6 +4601,12 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget) + if (!hsotg) + return -ENODEV; + ++ /* Exit clock gating when driver is stopped. */ ++ if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE && ++ hsotg->bus_suspended && !hsotg->params.no_clock_gating) { ++ dwc2_gadget_exit_clock_gating(hsotg, 0); ++ } ++ + /* all endpoints should be shutdown */ + for (ep = 1; ep < hsotg->num_of_eps; ep++) { + if (hsotg->eps_in[ep]) +diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c +index a7cd0a06879e61..5d0d8949539539 100644 +--- a/drivers/usb/gadget/function/f_tcm.c ++++ b/drivers/usb/gadget/function/f_tcm.c +@@ -1297,14 +1297,14 @@ static struct se_portal_group *usbg_make_tpg(struct se_wwn *wwn, + struct usbg_tport *tport = container_of(wwn, struct usbg_tport, + tport_wwn); + struct usbg_tpg *tpg; +- unsigned long tpgt; ++ u16 tpgt; + int ret; + struct f_tcm_opts *opts; + unsigned i; + + if (strstr(name, "tpgt_") != name) + return ERR_PTR(-EINVAL); +- if (kstrtoul(name + 5, 0, &tpgt) || tpgt > UINT_MAX) ++ if (kstrtou16(name + 5, 0, &tpgt)) + return ERR_PTR(-EINVAL); + ret = -ENODEV; + mutex_lock(&tpg_instances_lock); +diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c +index ccff838ab89e12..5f6fc5b79212ef 100644 +--- a/drivers/usb/typec/altmodes/displayport.c ++++ b/drivers/usb/typec/altmodes/displayport.c +@@ -323,6 +323,10 @@ static int dp_altmode_vdm(struct typec_altmode *alt, + break; + case CMDT_RSP_NAK: + switch (cmd) { ++ case DP_CMD_STATUS_UPDATE: ++ if (typec_altmode_exit(alt)) ++ dev_err(&dp->alt->dev, "Exit Mode Failed!\n"); ++ break; + case DP_CMD_CONFIGURE: + dp->data.conf = 0; + ret = dp_altmode_configured(dp); +diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c +index 80dd91938d9606..a5b2a6f9c57950 100644 +--- a/drivers/usb/typec/mux.c ++++ b/drivers/usb/typec/mux.c +@@ -214,7 +214,7 @@ int typec_switch_set(struct typec_switch *sw, + sw_dev = sw->sw_devs[i]; + + ret = sw_dev->set(sw_dev, orientation); +- if (ret) ++ if (ret && ret != -EOPNOTSUPP) + return ret; + } + +@@ -378,7 +378,7 @@ int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state) + mux_dev = mux->mux_devs[i]; + + ret = mux_dev->set(mux_dev, state); +- if (ret) ++ if (ret && ret != -EOPNOTSUPP) + return ret; + } + +diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c +index f1711b2f9ff057..d99e1b3e4e5c1a 100644 +--- a/drivers/video/console/dummycon.c ++++ b/drivers/video/console/dummycon.c +@@ -82,6 +82,15 @@ static int dummycon_blank(struct vc_data *vc, int blank, int mode_switch) + /* Redraw, so that we get putc(s) for output done while blanked */ + return 1; + } ++ ++static bool dummycon_switch(struct vc_data *vc) ++{ ++ /* ++ * Redraw, so that we get putc(s) for output done while switched ++ * away. Informs deferred consoles to take over the display. ++ */ ++ return true; ++} + #else + static void dummycon_putc(struct vc_data *vc, int c, int ypos, int xpos) { } + static void dummycon_putcs(struct vc_data *vc, const unsigned short *s, +@@ -90,6 +99,10 @@ static int dummycon_blank(struct vc_data *vc, int blank, int mode_switch) + { + return 0; + } ++static bool dummycon_switch(struct vc_data *vc) ++{ ++ return false; ++} + #endif + + static const char *dummycon_startup(void) +@@ -97,7 +110,7 @@ static const char *dummycon_startup(void) + return "dummy device"; + } + +-static void dummycon_init(struct vc_data *vc, int init) ++static void dummycon_init(struct vc_data *vc, bool init) + { + vc->vc_can_do_color = 1; + if (init) { +@@ -108,8 +121,8 @@ static void dummycon_init(struct vc_data *vc, int init) + } + + static void dummycon_deinit(struct vc_data *vc) { } +-static void dummycon_clear(struct vc_data *vc, int sy, int sx, int height, +- int width) { } ++static void dummycon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx, ++ unsigned int width) { } + static void dummycon_cursor(struct vc_data *vc, int mode) { } + + static bool dummycon_scroll(struct vc_data *vc, unsigned int top, +@@ -119,11 +132,6 @@ static bool dummycon_scroll(struct vc_data *vc, unsigned int top, + return false; + } + +-static int dummycon_switch(struct vc_data *vc) +-{ +- return 0; +-} +- + /* + * The console `switch' structure for the dummy console + * +diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c +index ef29b321967f07..26b41a8f36c870 100644 +--- a/drivers/video/console/mdacon.c ++++ b/drivers/video/console/mdacon.c +@@ -352,7 +352,7 @@ static const char *mdacon_startup(void) + return "MDA-2"; + } + +-static void mdacon_init(struct vc_data *c, int init) ++static void mdacon_init(struct vc_data *c, bool init) + { + c->vc_complement_mask = 0x0800; /* reverse video */ + c->vc_display_fg = &mda_display_fg; +@@ -442,26 +442,21 @@ static void mdacon_putcs(struct vc_data *c, const unsigned short *s, + } + } + +-static void mdacon_clear(struct vc_data *c, int y, int x, +- int height, int width) ++static void mdacon_clear(struct vc_data *c, unsigned int y, unsigned int x, ++ unsigned int width) + { + u16 *dest = mda_addr(x, y); + u16 eattr = mda_convert_attr(c->vc_video_erase_char); + +- if (width <= 0 || height <= 0) ++ if (width <= 0) + return; + +- if (x==0 && width==mda_num_columns) { +- scr_memsetw(dest, eattr, height*width*2); +- } else { +- for (; height > 0; height--, dest+=mda_num_columns) +- scr_memsetw(dest, eattr, width*2); +- } ++ scr_memsetw(dest, eattr, width * 2); + } +- +-static int mdacon_switch(struct vc_data *c) ++ ++static bool mdacon_switch(struct vc_data *c) + { +- return 1; /* redrawing needed */ ++ return true; /* redrawing needed */ + } + + static int mdacon_blank(struct vc_data *c, int blank, int mode_switch) +diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c +index e8e4f82cd4a1b8..63d96c4bbdccd3 100644 +--- a/drivers/video/console/newport_con.c ++++ b/drivers/video/console/newport_con.c +@@ -324,7 +324,7 @@ static const char *newport_startup(void) + return NULL; + } + +-static void newport_init(struct vc_data *vc, int init) ++static void newport_init(struct vc_data *vc, bool init) + { + int cols, rows; + +@@ -346,12 +346,12 @@ static void newport_deinit(struct vc_data *c) + } + } + +-static void newport_clear(struct vc_data *vc, int sy, int sx, int height, +- int width) ++static void newport_clear(struct vc_data *vc, unsigned int sy, unsigned int sx, ++ unsigned int width) + { + int xend = ((sx + width) << 3) - 1; + int ystart = ((sy << 4) + topscan) & 0x3ff; +- int yend = (((sy + height) << 4) + topscan - 1) & 0x3ff; ++ int yend = (((sy + 1) << 4) + topscan - 1) & 0x3ff; + + if (logo_active) + return; +@@ -462,7 +462,7 @@ static void newport_cursor(struct vc_data *vc, int mode) + } + } + +-static int newport_switch(struct vc_data *vc) ++static bool newport_switch(struct vc_data *vc) + { + static int logo_drawn = 0; + +@@ -476,7 +476,7 @@ static int newport_switch(struct vc_data *vc) + } + } + +- return 1; ++ return true; + } + + static int newport_blank(struct vc_data *c, int blank, int mode_switch) +diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c +index 992a4fa431aaa9..87900600eff11f 100644 +--- a/drivers/video/console/sticon.c ++++ b/drivers/video/console/sticon.c +@@ -273,7 +273,7 @@ static int sticon_font_set(struct vc_data *vc, struct console_font *font, + return sticon_set_font(vc, font, vpitch); + } + +-static void sticon_init(struct vc_data *c, int init) ++static void sticon_init(struct vc_data *c, bool init) + { + struct sti_struct *sti = sticon_sti; + int vc_cols, vc_rows; +@@ -300,19 +300,19 @@ static void sticon_deinit(struct vc_data *c) + sticon_set_def_font(i); + } + +-static void sticon_clear(struct vc_data *conp, int sy, int sx, int height, +- int width) ++static void sticon_clear(struct vc_data *conp, unsigned int sy, unsigned int sx, ++ unsigned int width) + { +- if (!height || !width) ++ if (!width) + return; + +- sti_clear(sticon_sti, sy, sx, height, width, ++ sti_clear(sticon_sti, sy, sx, 1, width, + conp->vc_video_erase_char, font_data[conp->vc_num]); + } + +-static int sticon_switch(struct vc_data *conp) ++static bool sticon_switch(struct vc_data *conp) + { +- return 1; /* needs refreshing */ ++ return true; /* needs refreshing */ + } + + static int sticon_blank(struct vc_data *c, int blank, int mode_switch) +diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c +index c9ec89649b0552..bbc362db40c586 100644 +--- a/drivers/video/console/vgacon.c ++++ b/drivers/video/console/vgacon.c +@@ -332,7 +332,7 @@ static const char *vgacon_startup(void) + return display_desc; + } + +-static void vgacon_init(struct vc_data *c, int init) ++static void vgacon_init(struct vc_data *c, bool init) + { + struct uni_pagedict *p; + +@@ -349,7 +349,7 @@ static void vgacon_init(struct vc_data *c, int init) + c->vc_scan_lines = vga_scan_lines; + c->vc_font.height = c->vc_cell_height = vga_video_font_height; + +- /* set dimensions manually if init != 0 since vc_resize() will fail */ ++ /* set dimensions manually if init is true since vc_resize() will fail */ + if (init) { + c->vc_cols = vga_video_num_columns; + c->vc_rows = vga_video_num_lines; +@@ -585,7 +585,7 @@ static void vgacon_doresize(struct vc_data *c, + raw_spin_unlock_irqrestore(&vga_lock, flags); + } + +-static int vgacon_switch(struct vc_data *c) ++static bool vgacon_switch(struct vc_data *c) + { + int x = c->vc_cols * VGA_FONTWIDTH; + int y = c->vc_rows * c->vc_cell_height; +@@ -614,7 +614,7 @@ static int vgacon_switch(struct vc_data *c) + vgacon_doresize(c, c->vc_cols, c->vc_rows); + } + +- return 0; /* Redrawing not needed */ ++ return false; /* Redrawing not needed */ + } + + static void vga_set_palette(struct vc_data *vc, const unsigned char *table) +@@ -1156,8 +1156,8 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, + * The console `switch' structure for the VGA based console + */ + +-static void vgacon_clear(struct vc_data *vc, int sy, int sx, int height, +- int width) { } ++static void vgacon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx, ++ unsigned int width) { } + static void vgacon_putc(struct vc_data *vc, int c, int ypos, int xpos) { } + static void vgacon_putcs(struct vc_data *vc, const unsigned short *s, + int count, int ypos, int xpos) { } +diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c +index 75996ef9992e41..9d095fe03e18ba 100644 +--- a/drivers/video/fbdev/core/fbcon.c ++++ b/drivers/video/fbdev/core/fbcon.c +@@ -993,7 +993,7 @@ static const char *fbcon_startup(void) + return display_desc; + } + +-static void fbcon_init(struct vc_data *vc, int init) ++static void fbcon_init(struct vc_data *vc, bool init) + { + struct fb_info *info; + struct fbcon_ops *ops; +@@ -1240,8 +1240,8 @@ static void fbcon_deinit(struct vc_data *vc) + * restriction is simplicity & efficiency at the moment. + */ + +-static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height, +- int width) ++static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx, ++ unsigned int height, unsigned int width) + { + struct fb_info *info = fbcon_info_from_console(vc->vc_num); + struct fbcon_ops *ops = info->fbcon_par; +@@ -1280,6 +1280,12 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height, + ops->clear(vc, info, real_y(p, sy), sx, height, width, fg, bg); + } + ++static void fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx, ++ unsigned int width) ++{ ++ __fbcon_clear(vc, sy, sx, 1, width); ++} ++ + static void fbcon_putcs(struct vc_data *vc, const unsigned short *s, + int count, int ypos, int xpos) + { +@@ -1767,7 +1773,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, + case SCROLL_MOVE: + fbcon_redraw_blit(vc, info, p, t, b - t - count, + count); +- fbcon_clear(vc, b - count, 0, count, vc->vc_cols); ++ __fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + scr_memsetw((unsigned short *) (vc->vc_origin + + vc->vc_size_row * + (b - count)), +@@ -1790,7 +1796,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, + b - t - count, vc->vc_cols); + else + goto redraw_up; +- fbcon_clear(vc, b - count, 0, count, vc->vc_cols); ++ __fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + break; + + case SCROLL_PAN_REDRAW: +@@ -1808,7 +1814,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, + vc->vc_rows - b, b); + } else + fbcon_redraw_move(vc, p, t + count, b - t - count, t); +- fbcon_clear(vc, b - count, 0, count, vc->vc_cols); ++ __fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + break; + + case SCROLL_PAN_MOVE: +@@ -1831,14 +1837,14 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, + b - t - count, vc->vc_cols); + else + goto redraw_up; +- fbcon_clear(vc, b - count, 0, count, vc->vc_cols); ++ __fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + break; + + case SCROLL_REDRAW: + redraw_up: + fbcon_redraw(vc, t, b - t - count, + count * vc->vc_cols); +- fbcon_clear(vc, b - count, 0, count, vc->vc_cols); ++ __fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + scr_memsetw((unsigned short *) (vc->vc_origin + + vc->vc_size_row * + (b - count)), +@@ -1855,7 +1861,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, + case SCROLL_MOVE: + fbcon_redraw_blit(vc, info, p, b - 1, b - t - count, + -count); +- fbcon_clear(vc, t, 0, count, vc->vc_cols); ++ __fbcon_clear(vc, t, 0, count, vc->vc_cols); + scr_memsetw((unsigned short *) (vc->vc_origin + + vc->vc_size_row * + t), +@@ -1878,7 +1884,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, + b - t - count, vc->vc_cols); + else + goto redraw_down; +- fbcon_clear(vc, t, 0, count, vc->vc_cols); ++ __fbcon_clear(vc, t, 0, count, vc->vc_cols); + break; + + case SCROLL_PAN_MOVE: +@@ -1900,7 +1906,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, + b - t - count, vc->vc_cols); + else + goto redraw_down; +- fbcon_clear(vc, t, 0, count, vc->vc_cols); ++ __fbcon_clear(vc, t, 0, count, vc->vc_cols); + break; + + case SCROLL_PAN_REDRAW: +@@ -1917,14 +1923,14 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, + fbcon_redraw_move(vc, p, count, t, 0); + } else + fbcon_redraw_move(vc, p, t, b - t - count, t + count); +- fbcon_clear(vc, t, 0, count, vc->vc_cols); ++ __fbcon_clear(vc, t, 0, count, vc->vc_cols); + break; + + case SCROLL_REDRAW: + redraw_down: + fbcon_redraw(vc, b - 1, b - t - count, + -count * vc->vc_cols); +- fbcon_clear(vc, t, 0, count, vc->vc_cols); ++ __fbcon_clear(vc, t, 0, count, vc->vc_cols); + scr_memsetw((unsigned short *) (vc->vc_origin + + vc->vc_size_row * + t), +@@ -2066,7 +2072,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width, + return 0; + } + +-static int fbcon_switch(struct vc_data *vc) ++static bool fbcon_switch(struct vc_data *vc) + { + struct fb_info *info, *old_info = NULL; + struct fbcon_ops *ops; +@@ -2188,9 +2194,9 @@ static int fbcon_switch(struct vc_data *vc) + vc->vc_origin + vc->vc_size_row * vc->vc_top, + vc->vc_size_row * (vc->vc_bottom - + vc->vc_top) / 2); +- return 0; ++ return false; + } +- return 1; ++ return true; + } + + static void fbcon_generic_blank(struct vc_data *vc, struct fb_info *info, +@@ -2203,7 +2209,7 @@ static void fbcon_generic_blank(struct vc_data *vc, struct fb_info *info, + + oldc = vc->vc_video_erase_char; + vc->vc_video_erase_char &= charmask; +- fbcon_clear(vc, 0, 0, vc->vc_rows, vc->vc_cols); ++ __fbcon_clear(vc, 0, 0, vc->vc_rows, vc->vc_cols); + vc->vc_video_erase_char = oldc; + } + } +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 34a30d61b470c3..bb5f7911d473cb 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -2148,8 +2148,7 @@ static int load_global_roots_objectid(struct btrfs_root *tree_root, + found = true; + root = read_tree_root_path(tree_root, path, &key); + if (IS_ERR(root)) { +- if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) +- ret = PTR_ERR(root); ++ ret = PTR_ERR(root); + break; + } + set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 48d2579236729d..af1f22b3cff7dc 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -8754,6 +8754,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, + int ret; + int ret2; + bool need_abort = false; ++ bool logs_pinned = false; + struct fscrypt_name old_fname, new_fname; + struct fscrypt_str *old_name, *new_name; + +@@ -8877,6 +8878,31 @@ static int btrfs_rename_exchange(struct inode *old_dir, + inode_inc_iversion(new_inode); + simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry); + ++ if (old_ino != BTRFS_FIRST_FREE_OBJECTID && ++ new_ino != BTRFS_FIRST_FREE_OBJECTID) { ++ /* ++ * If we are renaming in the same directory (and it's not for ++ * root entries) pin the log early to prevent any concurrent ++ * task from logging the directory after we removed the old ++ * entries and before we add the new entries, otherwise that ++ * task can sync a log without any entry for the inodes we are ++ * renaming and therefore replaying that log, if a power failure ++ * happens after syncing the log, would result in deleting the ++ * inodes. ++ * ++ * If the rename affects two different directories, we want to ++ * make sure the that there's no log commit that contains ++ * updates for only one of the directories but not for the ++ * other. ++ * ++ * If we are renaming an entry for a root, we don't care about ++ * log updates since we called btrfs_set_log_full_commit(). ++ */ ++ btrfs_pin_log_trans(root); ++ btrfs_pin_log_trans(dest); ++ logs_pinned = true; ++ } ++ + if (old_dentry->d_parent != new_dentry->d_parent) { + btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), + BTRFS_I(old_inode), true); +@@ -8934,30 +8960,23 @@ static int btrfs_rename_exchange(struct inode *old_dir, + BTRFS_I(new_inode)->dir_index = new_idx; + + /* +- * Now pin the logs of the roots. We do it to ensure that no other task +- * can sync the logs while we are in progress with the rename, because +- * that could result in an inconsistency in case any of the inodes that +- * are part of this rename operation were logged before. ++ * Do the log updates for all inodes. ++ * ++ * If either entry is for a root we don't need to update the logs since ++ * we've called btrfs_set_log_full_commit() before. + */ +- if (old_ino != BTRFS_FIRST_FREE_OBJECTID) +- btrfs_pin_log_trans(root); +- if (new_ino != BTRFS_FIRST_FREE_OBJECTID) +- btrfs_pin_log_trans(dest); +- +- /* Do the log updates for all inodes. */ +- if (old_ino != BTRFS_FIRST_FREE_OBJECTID) ++ if (logs_pinned) { + btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), + old_rename_ctx.index, new_dentry->d_parent); +- if (new_ino != BTRFS_FIRST_FREE_OBJECTID) + btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir), + new_rename_ctx.index, old_dentry->d_parent); ++ } + +- /* Now unpin the logs. */ +- if (old_ino != BTRFS_FIRST_FREE_OBJECTID) ++out_fail: ++ if (logs_pinned) { + btrfs_end_log_trans(root); +- if (new_ino != BTRFS_FIRST_FREE_OBJECTID) + btrfs_end_log_trans(dest); +-out_fail: ++ } + ret2 = btrfs_end_transaction(trans); + ret = ret ? ret : ret2; + out_notrans: +@@ -9007,6 +9026,7 @@ static int btrfs_rename(struct mnt_idmap *idmap, + int ret2; + u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); + struct fscrypt_name old_fname, new_fname; ++ bool logs_pinned = false; + + if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) + return -EPERM; +@@ -9141,6 +9161,29 @@ static int btrfs_rename(struct mnt_idmap *idmap, + inode_inc_iversion(old_inode); + simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry); + ++ if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { ++ /* ++ * If we are renaming in the same directory (and it's not a ++ * root entry) pin the log to prevent any concurrent task from ++ * logging the directory after we removed the old entry and ++ * before we add the new entry, otherwise that task can sync ++ * a log without any entry for the inode we are renaming and ++ * therefore replaying that log, if a power failure happens ++ * after syncing the log, would result in deleting the inode. ++ * ++ * If the rename affects two different directories, we want to ++ * make sure the that there's no log commit that contains ++ * updates for only one of the directories but not for the ++ * other. ++ * ++ * If we are renaming an entry for a root, we don't care about ++ * log updates since we called btrfs_set_log_full_commit(). ++ */ ++ btrfs_pin_log_trans(root); ++ btrfs_pin_log_trans(dest); ++ logs_pinned = true; ++ } ++ + if (old_dentry->d_parent != new_dentry->d_parent) + btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), + BTRFS_I(old_inode), true); +@@ -9189,7 +9232,7 @@ static int btrfs_rename(struct mnt_idmap *idmap, + if (old_inode->i_nlink == 1) + BTRFS_I(old_inode)->dir_index = index; + +- if (old_ino != BTRFS_FIRST_FREE_OBJECTID) ++ if (logs_pinned) + btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir), + rename_ctx.index, new_dentry->d_parent); + +@@ -9205,6 +9248,10 @@ static int btrfs_rename(struct mnt_idmap *idmap, + } + } + out_fail: ++ if (logs_pinned) { ++ btrfs_end_log_trans(root); ++ btrfs_end_log_trans(dest); ++ } + ret2 = btrfs_end_transaction(trans); + ret = ret ? ret : ret2; + out_notrans: +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index b9a0b26d08e1c4..1eb543602ff12f 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -3174,6 +3174,12 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) + device->bytes_used - dev_extent_len); + atomic64_add(dev_extent_len, &fs_info->free_chunk_space); + btrfs_clear_space_info_full(fs_info); ++ ++ if (list_empty(&device->post_commit_list)) { ++ list_add_tail(&device->post_commit_list, ++ &trans->transaction->dev_update_list); ++ } ++ + mutex_unlock(&fs_info->chunk_mutex); + } + } +diff --git a/fs/ceph/file.c b/fs/ceph/file.c +index a03b11cf788721..e12657b4c3e042 100644 +--- a/fs/ceph/file.c ++++ b/fs/ceph/file.c +@@ -2513,7 +2513,7 @@ static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length) + s32 stripe_unit = ci->i_layout.stripe_unit; + s32 stripe_count = ci->i_layout.stripe_count; + s32 object_size = ci->i_layout.object_size; +- u64 object_set_size = object_size * stripe_count; ++ u64 object_set_size = (u64) object_size * stripe_count; + u64 nearly, t; + + /* round offset up to next period boundary */ +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c +index 702137eafaa675..b9913ab526fd1a 100644 +--- a/fs/f2fs/super.c ++++ b/fs/f2fs/super.c +@@ -1774,26 +1774,32 @@ static int f2fs_statfs_project(struct super_block *sb, + + limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit, + dquot->dq_dqb.dqb_bhardlimit); +- if (limit) +- limit >>= sb->s_blocksize_bits; ++ limit >>= sb->s_blocksize_bits; ++ ++ if (limit) { ++ uint64_t remaining = 0; + +- if (limit && buf->f_blocks > limit) { + curblock = (dquot->dq_dqb.dqb_curspace + + dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; +- buf->f_blocks = limit; +- buf->f_bfree = buf->f_bavail = +- (buf->f_blocks > curblock) ? +- (buf->f_blocks - curblock) : 0; ++ if (limit > curblock) ++ remaining = limit - curblock; ++ ++ buf->f_blocks = min(buf->f_blocks, limit); ++ buf->f_bfree = min(buf->f_bfree, remaining); ++ buf->f_bavail = min(buf->f_bavail, remaining); + } + + limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit, + dquot->dq_dqb.dqb_ihardlimit); + +- if (limit && buf->f_files > limit) { +- buf->f_files = limit; +- buf->f_ffree = +- (buf->f_files > dquot->dq_dqb.dqb_curinodes) ? +- (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0; ++ if (limit) { ++ uint64_t remaining = 0; ++ ++ if (limit > dquot->dq_dqb.dqb_curinodes) ++ remaining = limit - dquot->dq_dqb.dqb_curinodes; ++ ++ buf->f_files = min(buf->f_files, limit); ++ buf->f_ffree = min(buf->f_ffree, remaining); + } + + spin_unlock(&dquot->dq_dqb_lock); +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c +index 82951a535d2d4d..0b84284ece98fa 100644 +--- a/fs/fuse/dir.c ++++ b/fs/fuse/dir.c +@@ -1860,6 +1860,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr, + int err; + bool trust_local_cmtime = is_wb; + bool fault_blocked = false; ++ u64 attr_version; + + if (!fc->default_permissions) + attr->ia_valid |= ATTR_FORCE; +@@ -1944,6 +1945,8 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr, + if (fc->handle_killpriv_v2 && !capable(CAP_FSETID)) + inarg.valid |= FATTR_KILL_SUIDGID; + } ++ ++ attr_version = fuse_get_attr_version(fm->fc); + fuse_setattr_fill(fc, &args, inode, &inarg, &outarg); + err = fuse_simple_request(fm, &args); + if (err) { +@@ -1969,6 +1972,14 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr, + /* FIXME: clear I_DIRTY_SYNC? */ + } + ++ if (fi->attr_version > attr_version) { ++ /* ++ * Apply attributes, for example for fsnotify_change(), but set ++ * attribute timeout to zero. ++ */ ++ outarg.attr_valid = outarg.attr_valid_nsec = 0; ++ } ++ + fuse_change_attributes_common(inode, &outarg.attr, NULL, + ATTR_TIMEOUT(&outarg), + fuse_get_cache_mask(inode)); +diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c +index 0e1019382cf519..35e063c9f3a42e 100644 +--- a/fs/jfs/jfs_dmap.c ++++ b/fs/jfs/jfs_dmap.c +@@ -178,45 +178,30 @@ int dbMount(struct inode *ipbmap) + dbmp_le = (struct dbmap_disk *) mp->data; + bmp->db_mapsize = le64_to_cpu(dbmp_le->dn_mapsize); + bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree); +- + bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage); +- if (bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE || +- bmp->db_l2nbperpage < 0) { +- err = -EINVAL; +- goto err_release_metapage; +- } +- + bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag); +- if (!bmp->db_numag || bmp->db_numag > MAXAG) { +- err = -EINVAL; +- goto err_release_metapage; +- } +- + bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel); + bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag); + bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref); +- if (bmp->db_maxag >= MAXAG || bmp->db_maxag < 0 || +- bmp->db_agpref >= MAXAG || bmp->db_agpref < 0) { +- err = -EINVAL; +- goto err_release_metapage; +- } +- + bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel); + bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight); + bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); +- if (!bmp->db_agwidth) { +- err = -EINVAL; +- goto err_release_metapage; +- } + bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); + bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); +- if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG || +- bmp->db_agl2size < 0) { +- err = -EINVAL; +- goto err_release_metapage; +- } + +- if (((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) { ++ if ((bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE) || ++ (bmp->db_l2nbperpage < 0) || ++ !bmp->db_numag || (bmp->db_numag > MAXAG) || ++ (bmp->db_maxag >= MAXAG) || (bmp->db_maxag < 0) || ++ (bmp->db_agpref >= MAXAG) || (bmp->db_agpref < 0) || ++ (bmp->db_agheight < 0) || (bmp->db_agheight > (L2LPERCTL >> 1)) || ++ (bmp->db_agwidth < 1) || (bmp->db_agwidth > (LPERCTL / MAXAG)) || ++ (bmp->db_agwidth > (1 << (L2LPERCTL - (bmp->db_agheight << 1)))) || ++ (bmp->db_agstart < 0) || ++ (bmp->db_agstart > (CTLTREESIZE - 1 - bmp->db_agwidth * (MAXAG - 1))) || ++ (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG) || ++ (bmp->db_agl2size < 0) || ++ ((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) { + err = -EINVAL; + goto err_release_metapage; + } +diff --git a/fs/namespace.c b/fs/namespace.c +index eab9185e228584..cebcb9fa2acc07 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -2364,14 +2364,14 @@ static int attach_recursive_mnt(struct mount *source_mnt, + hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) { + struct mount *q; + hlist_del_init(&child->mnt_hash); +- q = __lookup_mnt(&child->mnt_parent->mnt, +- child->mnt_mountpoint); +- if (q) +- mnt_change_mountpoint(child, smp, q); + /* Notice when we are propagating across user namespaces */ + if (child->mnt_parent->mnt_ns->user_ns != user_ns) + lock_mnt_tree(child); + child->mnt.mnt_flags &= ~MNT_LOCKED; ++ q = __lookup_mnt(&child->mnt_parent->mnt, ++ child->mnt_mountpoint); ++ if (q) ++ mnt_change_mountpoint(child, smp, q); + commit_tree(child); + } + put_mountpoint(smp); +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c +index 06230baaa554e7..419d98cf9e29f1 100644 +--- a/fs/nfs/inode.c ++++ b/fs/nfs/inode.c +@@ -555,6 +555,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr) + set_nlink(inode, fattr->nlink); + else if (fattr_supported & NFS_ATTR_FATTR_NLINK) + nfs_set_cache_invalid(inode, NFS_INO_INVALID_NLINK); ++ else ++ set_nlink(inode, 1); + if (fattr->valid & NFS_ATTR_FATTR_OWNER) + inode->i_uid = fattr->uid; + else if (fattr_supported & NFS_ATTR_FATTR_OWNER) +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 1b94a55215e7de..3085a2faab2d34 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -6059,6 +6059,8 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen, + struct nfs_server *server = NFS_SERVER(inode); + int ret; + ++ if (unlikely(NFS_FH(inode)->size == 0)) ++ return -ENODATA; + if (!nfs4_server_supports_acls(server, type)) + return -EOPNOTSUPP; + ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); +@@ -6133,6 +6135,9 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, + { + struct nfs4_exception exception = { }; + int err; ++ ++ if (unlikely(NFS_FH(inode)->size == 0)) ++ return -ENODATA; + do { + err = __nfs4_proc_set_acl(inode, buf, buflen, type); + trace_nfs4_set_acl(inode, err); +@@ -10625,7 +10630,7 @@ const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { + + static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) + { +- ssize_t error, error2, error3; ++ ssize_t error, error2, error3, error4; + size_t left = size; + + error = generic_listxattr(dentry, list, left); +@@ -10648,8 +10653,16 @@ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) + error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left); + if (error3 < 0) + return error3; ++ if (list) { ++ list += error3; ++ left -= error3; ++ } ++ ++ error4 = security_inode_listsecurity(d_inode(dentry), list, left); ++ if (error4 < 0) ++ return error4; + +- error += error2 + error3; ++ error += error2 + error3 + error4; + if (size && error > size) + return -ERANGE; + return error; +diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c +index 0bf3ffcd072f6a..0da1cd01d01cf7 100644 +--- a/fs/overlayfs/util.c ++++ b/fs/overlayfs/util.c +@@ -274,7 +274,9 @@ enum ovl_path_type ovl_path_realdata(struct dentry *dentry, struct path *path) + + struct dentry *ovl_dentry_upper(struct dentry *dentry) + { +- return ovl_upperdentry_dereference(OVL_I(d_inode(dentry))); ++ struct inode *inode = d_inode(dentry); ++ ++ return inode ? ovl_upperdentry_dereference(OVL_I(inode)) : NULL; + } + + struct dentry *ovl_dentry_lower(struct dentry *dentry) +diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h +index 6a4ed99e162c5d..d776340ad91ce6 100644 +--- a/fs/smb/client/cifsglob.h ++++ b/fs/smb/client/cifsglob.h +@@ -677,6 +677,7 @@ inc_rfc1001_len(void *buf, int count) + struct TCP_Server_Info { + struct list_head tcp_ses_list; + struct list_head smb_ses_list; ++ struct list_head rlist; /* reconnect list */ + spinlock_t srv_lock; /* protect anything here that is not protected */ + __u64 conn_id; /* connection identifier (useful for debugging) */ + int srv_count; /* reference counter */ +@@ -739,6 +740,7 @@ struct TCP_Server_Info { + char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; + __u32 sequence_number; /* for signing, protected by srv_mutex */ + __u32 reconnect_instance; /* incremented on each reconnect */ ++ __le32 session_key_id; /* retrieved from negotiate response and send in session setup request */ + struct session_key session_key; + unsigned long lstrp; /* when we got last response from this server */ + struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */ +diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h +index ca33f6cd6a8004..763178b7745424 100644 +--- a/fs/smb/client/cifspdu.h ++++ b/fs/smb/client/cifspdu.h +@@ -557,7 +557,7 @@ typedef union smb_com_session_setup_andx { + __le16 MaxBufferSize; + __le16 MaxMpxCount; + __le16 VcNumber; +- __u32 SessionKey; ++ __le32 SessionKey; + __le16 SecurityBlobLength; + __u32 Reserved; + __le32 Capabilities; /* see below */ +@@ -576,7 +576,7 @@ typedef union smb_com_session_setup_andx { + __le16 MaxBufferSize; + __le16 MaxMpxCount; + __le16 VcNumber; +- __u32 SessionKey; ++ __le32 SessionKey; + __le16 CaseInsensitivePasswordLength; /* ASCII password len */ + __le16 CaseSensitivePasswordLength; /* Unicode password length*/ + __u32 Reserved; /* see below */ +@@ -614,7 +614,7 @@ typedef union smb_com_session_setup_andx { + __le16 MaxBufferSize; + __le16 MaxMpxCount; + __le16 VcNumber; +- __u32 SessionKey; ++ __le32 SessionKey; + __le16 PasswordLength; + __u32 Reserved; /* encrypt key len and offset */ + __le16 ByteCount; +diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c +index c36ab20050c16d..db35e68e8a5830 100644 +--- a/fs/smb/client/cifssmb.c ++++ b/fs/smb/client/cifssmb.c +@@ -479,6 +479,7 @@ CIFSSMBNegotiate(const unsigned int xid, + server->max_rw = le32_to_cpu(pSMBr->MaxRawSize); + cifs_dbg(NOISY, "Max buf = %d\n", ses->server->maxBuf); + server->capabilities = le32_to_cpu(pSMBr->Capabilities); ++ server->session_key_id = pSMBr->SessionKey; + server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone); + server->timeAdj *= 60; + +diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c +index 8fa5fe0a8c5c59..454420aa02220f 100644 +--- a/fs/smb/client/connect.c ++++ b/fs/smb/client/connect.c +@@ -140,6 +140,14 @@ static void smb2_query_server_interfaces(struct work_struct *work) + (SMB_INTERFACE_POLL_INTERVAL * HZ)); + } + ++#define set_need_reco(server) \ ++do { \ ++ spin_lock(&server->srv_lock); \ ++ if (server->tcpStatus != CifsExiting) \ ++ server->tcpStatus = CifsNeedReconnect; \ ++ spin_unlock(&server->srv_lock); \ ++} while (0) ++ + /* + * Update the tcpStatus for the server. + * This is used to signal the cifsd thread to call cifs_reconnect +@@ -153,39 +161,45 @@ void + cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server, + bool all_channels) + { +- struct TCP_Server_Info *pserver; ++ struct TCP_Server_Info *nserver; + struct cifs_ses *ses; ++ LIST_HEAD(reco); + int i; + +- /* If server is a channel, select the primary channel */ +- pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; +- + /* if we need to signal just this channel */ + if (!all_channels) { +- spin_lock(&server->srv_lock); +- if (server->tcpStatus != CifsExiting) +- server->tcpStatus = CifsNeedReconnect; +- spin_unlock(&server->srv_lock); ++ set_need_reco(server); + return; + } + +- spin_lock(&cifs_tcp_ses_lock); +- list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { +- if (cifs_ses_exiting(ses)) +- continue; +- spin_lock(&ses->chan_lock); +- for (i = 0; i < ses->chan_count; i++) { +- if (!ses->chans[i].server) ++ if (SERVER_IS_CHAN(server)) ++ server = server->primary_server; ++ scoped_guard(spinlock, &cifs_tcp_ses_lock) { ++ set_need_reco(server); ++ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { ++ spin_lock(&ses->ses_lock); ++ if (ses->ses_status == SES_EXITING) { ++ spin_unlock(&ses->ses_lock); + continue; +- +- spin_lock(&ses->chans[i].server->srv_lock); +- if (ses->chans[i].server->tcpStatus != CifsExiting) +- ses->chans[i].server->tcpStatus = CifsNeedReconnect; +- spin_unlock(&ses->chans[i].server->srv_lock); ++ } ++ spin_lock(&ses->chan_lock); ++ for (i = 1; i < ses->chan_count; i++) { ++ nserver = ses->chans[i].server; ++ if (!nserver) ++ continue; ++ nserver->srv_count++; ++ list_add(&nserver->rlist, &reco); ++ } ++ spin_unlock(&ses->chan_lock); ++ spin_unlock(&ses->ses_lock); + } +- spin_unlock(&ses->chan_lock); + } +- spin_unlock(&cifs_tcp_ses_lock); ++ ++ list_for_each_entry_safe(server, nserver, &reco, rlist) { ++ list_del_init(&server->rlist); ++ set_need_reco(server); ++ cifs_put_tcp_session(server, 0); ++ } + } + + /* +diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c +index 65d4b72b4d51a9..9e8e0a01ae8eb0 100644 +--- a/fs/smb/client/misc.c ++++ b/fs/smb/client/misc.c +@@ -320,6 +320,14 @@ check_smb_hdr(struct smb_hdr *smb) + if (smb->Command == SMB_COM_LOCKING_ANDX) + return 0; + ++ /* ++ * Windows NT server returns error resposne (e.g. STATUS_DELETE_PENDING ++ * or STATUS_OBJECT_NAME_NOT_FOUND or ERRDOS/ERRbadfile or any other) ++ * for some TRANS2 requests without the RESPONSE flag set in header. ++ */ ++ if (smb->Command == SMB_COM_TRANSACTION2 && smb->Status.CifsError != 0) ++ return 0; ++ + cifs_dbg(VFS, "Server sent request, not response. mid=%u\n", + get_mid(smb)); + return 1; +diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c +index 8959206a0353eb..c351da8c3e2eaf 100644 +--- a/fs/smb/client/sess.c ++++ b/fs/smb/client/sess.c +@@ -683,6 +683,7 @@ static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, + USHRT_MAX)); + pSMB->req.MaxMpxCount = cpu_to_le16(server->maxReq); + pSMB->req.VcNumber = cpu_to_le16(1); ++ pSMB->req.SessionKey = server->session_key_id; + + /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */ + +@@ -1739,22 +1740,22 @@ _sess_auth_rawntlmssp_assemble_req(struct sess_data *sess_data) + pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; + + capabilities = cifs_ssetup_hdr(ses, server, pSMB); +- if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) { +- cifs_dbg(VFS, "NTLMSSP requires Unicode support\n"); +- return -ENOSYS; +- } +- + pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; + capabilities |= CAP_EXTENDED_SECURITY; + pSMB->req.Capabilities |= cpu_to_le32(capabilities); + + bcc_ptr = sess_data->iov[2].iov_base; +- /* unicode strings must be word aligned */ +- if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) { +- *bcc_ptr = 0; +- bcc_ptr++; ++ ++ if (pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) { ++ /* unicode strings must be word aligned */ ++ if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) { ++ *bcc_ptr = 0; ++ bcc_ptr++; ++ } ++ unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp); ++ } else { ++ ascii_oslm_strings(&bcc_ptr, sess_data->nls_cp); + } +- unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp); + + sess_data->iov[2].iov_len = (long) bcc_ptr - + (long) sess_data->iov[2].iov_base; +diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h +index 4fdd76ce53b90c..dc07c6eb8c1921 100644 +--- a/fs/smb/server/connection.h ++++ b/fs/smb/server/connection.h +@@ -107,6 +107,7 @@ struct ksmbd_conn { + __le16 signing_algorithm; + bool binding; + atomic_t refcnt; ++ bool is_aapl; + }; + + struct ksmbd_conn_ops { +diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c +index d8325504a1624b..6c22240368abf4 100644 +--- a/fs/smb/server/smb2pdu.c ++++ b/fs/smb/server/smb2pdu.c +@@ -1345,8 +1345,7 @@ static int ntlm_negotiate(struct ksmbd_work *work, + return rc; + + sz = le16_to_cpu(rsp->SecurityBufferOffset); +- chgblob = +- (struct challenge_message *)((char *)&rsp->hdr.ProtocolId + sz); ++ chgblob = (struct challenge_message *)rsp->Buffer; + memset(chgblob, 0, sizeof(struct challenge_message)); + + if (!work->conn->use_spnego) { +@@ -1379,8 +1378,7 @@ static int ntlm_negotiate(struct ksmbd_work *work, + goto out; + } + +- sz = le16_to_cpu(rsp->SecurityBufferOffset); +- memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob, spnego_blob_len); ++ memcpy(rsp->Buffer, spnego_blob, spnego_blob_len); + rsp->SecurityBufferLength = cpu_to_le16(spnego_blob_len); + + out: +@@ -1462,8 +1460,7 @@ static int ntlm_authenticate(struct ksmbd_work *work, + if (rc) + return -ENOMEM; + +- sz = le16_to_cpu(rsp->SecurityBufferOffset); +- memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob, spnego_blob_len); ++ memcpy(rsp->Buffer, spnego_blob, spnego_blob_len); + rsp->SecurityBufferLength = cpu_to_le16(spnego_blob_len); + kfree(spnego_blob); + } +@@ -2862,7 +2859,7 @@ int smb2_open(struct ksmbd_work *work) + int req_op_level = 0, open_flags = 0, may_flags = 0, file_info = 0; + int rc = 0; + int contxt_cnt = 0, query_disk_id = 0; +- int maximal_access_ctxt = 0, posix_ctxt = 0; ++ bool maximal_access_ctxt = false, posix_ctxt = false; + int s_type = 0; + int next_off = 0; + char *name = NULL; +@@ -2889,6 +2886,27 @@ int smb2_open(struct ksmbd_work *work) + return create_smb2_pipe(work); + } + ++ if (req->CreateContextsOffset && tcon->posix_extensions) { ++ context = smb2_find_context_vals(req, SMB2_CREATE_TAG_POSIX, 16); ++ if (IS_ERR(context)) { ++ rc = PTR_ERR(context); ++ goto err_out2; ++ } else if (context) { ++ struct create_posix *posix = (struct create_posix *)context; ++ ++ if (le16_to_cpu(context->DataOffset) + ++ le32_to_cpu(context->DataLength) < ++ sizeof(struct create_posix) - 4) { ++ rc = -EINVAL; ++ goto err_out2; ++ } ++ ksmbd_debug(SMB, "get posix context\n"); ++ ++ posix_mode = le32_to_cpu(posix->Mode); ++ posix_ctxt = true; ++ } ++ } ++ + if (req->NameLength) { + name = smb2_get_name((char *)req + le16_to_cpu(req->NameOffset), + le16_to_cpu(req->NameLength), +@@ -2911,9 +2929,11 @@ int smb2_open(struct ksmbd_work *work) + goto err_out2; + } + +- rc = ksmbd_validate_filename(name); +- if (rc < 0) +- goto err_out2; ++ if (posix_ctxt == false) { ++ rc = ksmbd_validate_filename(name); ++ if (rc < 0) ++ goto err_out2; ++ } + + if (ksmbd_share_veto_filename(share, name)) { + rc = -ENOENT; +@@ -3071,28 +3091,6 @@ int smb2_open(struct ksmbd_work *work) + rc = -EBADF; + goto err_out2; + } +- +- if (tcon->posix_extensions) { +- context = smb2_find_context_vals(req, +- SMB2_CREATE_TAG_POSIX, 16); +- if (IS_ERR(context)) { +- rc = PTR_ERR(context); +- goto err_out2; +- } else if (context) { +- struct create_posix *posix = +- (struct create_posix *)context; +- if (le16_to_cpu(context->DataOffset) + +- le32_to_cpu(context->DataLength) < +- sizeof(struct create_posix) - 4) { +- rc = -EINVAL; +- goto err_out2; +- } +- ksmbd_debug(SMB, "get posix context\n"); +- +- posix_mode = le32_to_cpu(posix->Mode); +- posix_ctxt = 1; +- } +- } + } + + if (ksmbd_override_fsids(work)) { +@@ -3526,6 +3524,15 @@ int smb2_open(struct ksmbd_work *work) + ksmbd_debug(SMB, "get query on disk id context\n"); + query_disk_id = 1; + } ++ ++ if (conn->is_aapl == false) { ++ context = smb2_find_context_vals(req, SMB2_CREATE_AAPL, 4); ++ if (IS_ERR(context)) { ++ rc = PTR_ERR(context); ++ goto err_out1; ++ } else if (context) ++ conn->is_aapl = true; ++ } + } + + rc = ksmbd_vfs_getattr(&path, &stat); +@@ -3964,7 +3971,10 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level, + if (dinfo->EaSize) + dinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE; + dinfo->Reserved = 0; +- dinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino); ++ if (conn->is_aapl) ++ dinfo->UniqueId = 0; ++ else ++ dinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino); + if (d_info->hide_dot_file && d_info->name[0] == '.') + dinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE; + memcpy(dinfo->FileName, conv_name, conv_len); +@@ -3981,7 +3991,10 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level, + smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode); + if (fibdinfo->EaSize) + fibdinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE; +- fibdinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino); ++ if (conn->is_aapl) ++ fibdinfo->UniqueId = 0; ++ else ++ fibdinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino); + fibdinfo->ShortNameLength = 0; + fibdinfo->Reserved = 0; + fibdinfo->Reserved2 = cpu_to_le16(0); +diff --git a/fs/smb/server/smb2pdu.h b/fs/smb/server/smb2pdu.h +index 25cc81aac350f2..2821e6c8298f4d 100644 +--- a/fs/smb/server/smb2pdu.h ++++ b/fs/smb/server/smb2pdu.h +@@ -63,6 +63,9 @@ struct preauth_integrity_info { + + #define SMB2_SESSION_TIMEOUT (10 * HZ) + ++/* Apple Defined Contexts */ ++#define SMB2_CREATE_AAPL "AAPL" ++ + struct create_durable_req_v2 { + struct create_context_hdr ccontext; + __u8 Name[8]; +diff --git a/include/linux/console.h b/include/linux/console.h +index 7de11c763eb35d..38571607065d76 100644 +--- a/include/linux/console.h ++++ b/include/linux/console.h +@@ -36,9 +36,14 @@ enum vc_intensity; + /** + * struct consw - callbacks for consoles + * ++ * @con_init: initialize the console on @vc. @init is true for the very first ++ * call on this @vc. ++ * @con_clear: erase @count characters at [@x, @y] on @vc. @count >= 1. + * @con_scroll: move lines from @top to @bottom in direction @dir by @lines. + * Return true if no generic handling should be done. + * Invoked by csi_M and printing to the console. ++ * @con_switch: notifier about the console switch; it is supposed to return ++ * true if a redraw is needed. + * @con_set_palette: sets the palette of the console to @table (optional) + * @con_scrolldelta: the contents of the console should be scrolled by @lines. + * Invoked by user. (optional) +@@ -46,10 +51,10 @@ enum vc_intensity; + struct consw { + struct module *owner; + const char *(*con_startup)(void); +- void (*con_init)(struct vc_data *vc, int init); ++ void (*con_init)(struct vc_data *vc, bool init); + void (*con_deinit)(struct vc_data *vc); +- void (*con_clear)(struct vc_data *vc, int sy, int sx, int height, +- int width); ++ void (*con_clear)(struct vc_data *vc, unsigned int y, ++ unsigned int x, unsigned int count); + void (*con_putc)(struct vc_data *vc, int c, int ypos, int xpos); + void (*con_putcs)(struct vc_data *vc, const unsigned short *s, + int count, int ypos, int xpos); +@@ -57,7 +62,7 @@ struct consw { + bool (*con_scroll)(struct vc_data *vc, unsigned int top, + unsigned int bottom, enum con_scroll dir, + unsigned int lines); +- int (*con_switch)(struct vc_data *vc); ++ bool (*con_switch)(struct vc_data *vc); + int (*con_blank)(struct vc_data *vc, int blank, int mode_switch); + int (*con_font_set)(struct vc_data *vc, struct console_font *font, + unsigned int vpitch, unsigned int flags); +diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h +index b5bf5315ca8c10..e4ad9760774e14 100644 +--- a/include/linux/hyperv.h ++++ b/include/linux/hyperv.h +@@ -820,6 +820,8 @@ struct vmbus_requestor { + #define VMBUS_RQST_RESET (U64_MAX - 3) + + struct vmbus_device { ++ /* preferred ring buffer size in KB, 0 means no preferred size for this device */ ++ size_t pref_ring_size; + u16 dev_type; + guid_t guid; + bool perf_device; +diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h +index d79851c5fabd86..af8a771a053c51 100644 +--- a/include/linux/ipv6.h ++++ b/include/linux/ipv6.h +@@ -199,7 +199,6 @@ struct inet6_cork { + struct ipv6_txoptions *opt; + u8 hop_limit; + u8 tclass; +- u8 dontfrag:1; + }; + + /* struct ipv6_pinfo - ipv6 private area */ +diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h +index ed07181d4eff91..e05280e4152286 100644 +--- a/include/uapi/linux/vm_sockets.h ++++ b/include/uapi/linux/vm_sockets.h +@@ -17,6 +17,10 @@ + #ifndef _UAPI_VM_SOCKETS_H + #define _UAPI_VM_SOCKETS_H + ++#ifndef __KERNEL__ ++#include /* for struct sockaddr and sa_family_t */ ++#endif ++ + #include + #include + +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug +index e809b6d8bc5373..1aae81c57b2c9a 100644 +--- a/lib/Kconfig.debug ++++ b/lib/Kconfig.debug +@@ -2747,6 +2747,15 @@ config FORTIFY_KUNIT_TEST + by the str*() and mem*() family of functions. For testing runtime + traps of FORTIFY_SOURCE, see LKDTM's "FORTIFY_*" tests. + ++config LONGEST_SYM_KUNIT_TEST ++ tristate "Test the longest symbol possible" if !KUNIT_ALL_TESTS ++ depends on KUNIT && KPROBES ++ default KUNIT_ALL_TESTS ++ help ++ Tests the longest symbol possible ++ ++ If unsure, say N. ++ + config HW_BREAKPOINT_KUNIT_TEST + bool "Test hw_breakpoint constraints accounting" if !KUNIT_ALL_TESTS + depends on HAVE_HW_BREAKPOINT +diff --git a/lib/Makefile b/lib/Makefile +index 740109b6e2c89f..b9d2577fbbe190 100644 +--- a/lib/Makefile ++++ b/lib/Makefile +@@ -402,6 +402,8 @@ obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o + obj-$(CONFIG_STRCAT_KUNIT_TEST) += strcat_kunit.o + obj-$(CONFIG_STRSCPY_KUNIT_TEST) += strscpy_kunit.o + obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o ++obj-$(CONFIG_LONGEST_SYM_KUNIT_TEST) += longest_symbol_kunit.o ++CFLAGS_longest_symbol_kunit.o += $(call cc-disable-warning, missing-prototypes) + + obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o + +diff --git a/lib/group_cpus.c b/lib/group_cpus.c +index ee272c4cefcc13..18d43a406114b9 100644 +--- a/lib/group_cpus.c ++++ b/lib/group_cpus.c +@@ -352,6 +352,9 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps) + int ret = -ENOMEM; + struct cpumask *masks = NULL; + ++ if (numgrps == 0) ++ return NULL; ++ + if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) + return NULL; + +@@ -426,8 +429,12 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps) + #else /* CONFIG_SMP */ + struct cpumask *group_cpus_evenly(unsigned int numgrps) + { +- struct cpumask *masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL); ++ struct cpumask *masks; + ++ if (numgrps == 0) ++ return NULL; ++ ++ masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL); + if (!masks) + return NULL; + +diff --git a/lib/longest_symbol_kunit.c b/lib/longest_symbol_kunit.c +new file mode 100644 +index 00000000000000..2fea82a6d34e5c +--- /dev/null ++++ b/lib/longest_symbol_kunit.c +@@ -0,0 +1,82 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Test the longest symbol length. Execute with: ++ * ./tools/testing/kunit/kunit.py run longest-symbol ++ * --arch=x86_64 --kconfig_add CONFIG_KPROBES=y --kconfig_add CONFIG_MODULES=y ++ * --kconfig_add CONFIG_RETPOLINE=n --kconfig_add CONFIG_CFI_CLANG=n ++ * --kconfig_add CONFIG_MITIGATION_RETPOLINE=n ++ */ ++ ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ ++#include ++#include ++#include ++#include ++ ++#define DI(name) s##name##name ++#define DDI(name) DI(n##name##name) ++#define DDDI(name) DDI(n##name##name) ++#define DDDDI(name) DDDI(n##name##name) ++#define DDDDDI(name) DDDDI(n##name##name) ++ ++/*Generate a symbol whose name length is 511 */ ++#define LONGEST_SYM_NAME DDDDDI(g1h2i3j4k5l6m7n) ++ ++#define RETURN_LONGEST_SYM 0xAAAAA ++ ++noinline int LONGEST_SYM_NAME(void); ++noinline int LONGEST_SYM_NAME(void) ++{ ++ return RETURN_LONGEST_SYM; ++} ++ ++_Static_assert(sizeof(__stringify(LONGEST_SYM_NAME)) == KSYM_NAME_LEN, ++"Incorrect symbol length found. Expected KSYM_NAME_LEN: " ++__stringify(KSYM_NAME_LEN) ", but found: " ++__stringify(sizeof(LONGEST_SYM_NAME))); ++ ++static void test_longest_symbol(struct kunit *test) ++{ ++ KUNIT_EXPECT_EQ(test, RETURN_LONGEST_SYM, LONGEST_SYM_NAME()); ++}; ++ ++static void test_longest_symbol_kallsyms(struct kunit *test) ++{ ++ unsigned long (*kallsyms_lookup_name)(const char *name); ++ static int (*longest_sym)(void); ++ ++ struct kprobe kp = { ++ .symbol_name = "kallsyms_lookup_name", ++ }; ++ ++ if (register_kprobe(&kp) < 0) { ++ pr_info("%s: kprobe not registered", __func__); ++ KUNIT_FAIL(test, "test_longest_symbol kallsyms: kprobe not registered\n"); ++ return; ++ } ++ ++ kunit_warn(test, "test_longest_symbol kallsyms: kprobe registered\n"); ++ kallsyms_lookup_name = (unsigned long (*)(const char *name))kp.addr; ++ unregister_kprobe(&kp); ++ ++ longest_sym = ++ (void *) kallsyms_lookup_name(__stringify(LONGEST_SYM_NAME)); ++ KUNIT_EXPECT_EQ(test, RETURN_LONGEST_SYM, longest_sym()); ++}; ++ ++static struct kunit_case longest_symbol_test_cases[] = { ++ KUNIT_CASE(test_longest_symbol), ++ KUNIT_CASE(test_longest_symbol_kallsyms), ++ {} ++}; ++ ++static struct kunit_suite longest_symbol_test_suite = { ++ .name = "longest-symbol", ++ .test_cases = longest_symbol_test_cases, ++}; ++kunit_test_suite(longest_symbol_test_suite); ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("Test the longest symbol length"); ++MODULE_AUTHOR("Sergio González Collado"); +diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c +index 26c948f87489ee..19d661889cf796 100644 +--- a/mm/damon/sysfs-schemes.c ++++ b/mm/damon/sysfs-schemes.c +@@ -376,6 +376,7 @@ static ssize_t memcg_path_store(struct kobject *kobj, + return -ENOMEM; + + strscpy(path, buf, count + 1); ++ kfree(filter->memcg_path); + filter->memcg_path = path; + return count; + } +diff --git a/net/atm/clip.c b/net/atm/clip.c +index 294cb9efe3d382..511467bb7fe40d 100644 +--- a/net/atm/clip.c ++++ b/net/atm/clip.c +@@ -193,12 +193,6 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb) + + pr_debug("\n"); + +- if (!clip_devs) { +- atm_return(vcc, skb->truesize); +- kfree_skb(skb); +- return; +- } +- + if (!skb) { + pr_debug("removing VCC %p\n", clip_vcc); + if (clip_vcc->entry) +@@ -208,6 +202,11 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb) + return; + } + atm_return(vcc, skb->truesize); ++ if (!clip_devs) { ++ kfree_skb(skb); ++ return; ++ } ++ + skb->dev = clip_vcc->entry ? clip_vcc->entry->neigh->dev : clip_devs; + /* clip_vcc->entry == NULL if we don't have an IP address yet */ + if (!skb->dev) { +diff --git a/net/atm/resources.c b/net/atm/resources.c +index 995d29e7fb138c..b19d851e1f4439 100644 +--- a/net/atm/resources.c ++++ b/net/atm/resources.c +@@ -146,11 +146,10 @@ void atm_dev_deregister(struct atm_dev *dev) + */ + mutex_lock(&atm_dev_mutex); + list_del(&dev->dev_list); +- mutex_unlock(&atm_dev_mutex); +- + atm_dev_release_vccs(dev); + atm_unregister_sysfs(dev); + atm_proc_dev_deregister(dev); ++ mutex_unlock(&atm_dev_mutex); + + atm_dev_put(dev); + } +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c +index 2744ad11687c65..f9995a405e35c3 100644 +--- a/net/bluetooth/l2cap_core.c ++++ b/net/bluetooth/l2cap_core.c +@@ -3380,7 +3380,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data + struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; + struct l2cap_conf_efs efs; + u8 remote_efs = 0; +- u16 mtu = L2CAP_DEFAULT_MTU; ++ u16 mtu = 0; + u16 result = L2CAP_CONF_SUCCESS; + u16 size; + +@@ -3485,6 +3485,13 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data + /* Configure output options and let the other side know + * which ones we don't like. */ + ++ /* If MTU is not provided in configure request, use the most recently ++ * explicitly or implicitly accepted value for the other direction, ++ * or the default value. ++ */ ++ if (mtu == 0) ++ mtu = chan->imtu ? chan->imtu : L2CAP_DEFAULT_MTU; ++ + if (mtu < L2CAP_DEFAULT_MIN_MTU) + result = L2CAP_CONF_UNACCEPT; + else { +diff --git a/net/core/selftests.c b/net/core/selftests.c +index 7af99d07762ea0..946e92cca21110 100644 +--- a/net/core/selftests.c ++++ b/net/core/selftests.c +@@ -160,8 +160,9 @@ static struct sk_buff *net_test_get_skb(struct net_device *ndev, + skb->csum = 0; + skb->ip_summed = CHECKSUM_PARTIAL; + if (attr->tcp) { +- thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, +- ihdr->daddr, 0); ++ int l4len = skb->len - skb_transport_offset(skb); ++ ++ thdr->check = ~tcp_v4_check(l4len, ihdr->saddr, ihdr->daddr, 0); + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct tcphdr, check); + } else { +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index 28777b14224048..c86d5dca29df01 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -1452,7 +1452,6 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, + } + v6_cork->hop_limit = ipc6->hlimit; + v6_cork->tclass = ipc6->tclass; +- v6_cork->dontfrag = ipc6->dontfrag; + if (rt->dst.flags & DST_XFRM_TUNNEL) + mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? + READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst); +@@ -1486,7 +1485,7 @@ static int __ip6_append_data(struct sock *sk, + int getfrag(void *from, char *to, int offset, + int len, int odd, struct sk_buff *skb), + void *from, size_t length, int transhdrlen, +- unsigned int flags) ++ unsigned int flags, struct ipcm6_cookie *ipc6) + { + struct sk_buff *skb, *skb_prev = NULL; + struct inet_cork *cork = &cork_full->base; +@@ -1542,7 +1541,7 @@ static int __ip6_append_data(struct sock *sk, + if (headersize + transhdrlen > mtu) + goto emsgsize; + +- if (cork->length + length > mtu - headersize && v6_cork->dontfrag && ++ if (cork->length + length > mtu - headersize && ipc6->dontfrag && + (sk->sk_protocol == IPPROTO_UDP || + sk->sk_protocol == IPPROTO_ICMPV6 || + sk->sk_protocol == IPPROTO_RAW)) { +@@ -1914,7 +1913,7 @@ int ip6_append_data(struct sock *sk, + + return __ip6_append_data(sk, &sk->sk_write_queue, &inet->cork, + &np->cork, sk_page_frag(sk), getfrag, +- from, length, transhdrlen, flags); ++ from, length, transhdrlen, flags, ipc6); + } + EXPORT_SYMBOL_GPL(ip6_append_data); + +@@ -2119,7 +2118,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk, + err = __ip6_append_data(sk, &queue, cork, &v6_cork, + ¤t->task_frag, getfrag, from, + length + exthdrlen, transhdrlen + exthdrlen, +- flags); ++ flags, ipc6); + if (err) { + __ip6_flush_pending_frames(sk, &queue, cork, &v6_cork); + return ERR_PTR(err); +diff --git a/net/mac80211/util.c b/net/mac80211/util.c +index 154b41af4157d0..3a3cd09bdab658 100644 +--- a/net/mac80211/util.c ++++ b/net/mac80211/util.c +@@ -4753,7 +4753,7 @@ void ieee80211_recalc_dtim(struct ieee80211_local *local, + { + u64 tsf = drv_get_tsf(local, sdata); + u64 dtim_count = 0; +- u16 beacon_int = sdata->vif.bss_conf.beacon_int * 1024; ++ u32 beacon_int = sdata->vif.bss_conf.beacon_int * 1024; + u8 dtim_period = sdata->vif.bss_conf.dtim_period; + struct ps_data *ps; + u8 bcns_from_dtim; +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c +index 4ffb2bcaf3648e..63756607f63272 100644 +--- a/net/sunrpc/clnt.c ++++ b/net/sunrpc/clnt.c +@@ -2733,8 +2733,13 @@ rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr) + case -EPROTONOSUPPORT: + goto out_err; + case -EACCES: +- /* Re-encode with a fresh cred */ +- fallthrough; ++ /* possible RPCSEC_GSS out-of-sequence event (RFC2203), ++ * reset recv state and keep waiting, don't retransmit ++ */ ++ task->tk_rqstp->rq_reply_bytes_recvd = 0; ++ task->tk_status = xprt_request_enqueue_receive(task); ++ task->tk_action = call_transmit_status; ++ return -EBADMSG; + default: + goto out_garbage; + } +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index 236a2cd2bc93d2..f89cd01247f6bb 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -125,6 +125,46 @@ static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2]; + * hash table is protected with spinlock. + * each socket state is protected by separate spinlock. + */ ++#ifdef CONFIG_PROVE_LOCKING ++#define cmp_ptr(l, r) (((l) > (r)) - ((l) < (r))) ++ ++static int unix_table_lock_cmp_fn(const struct lockdep_map *a, ++ const struct lockdep_map *b) ++{ ++ return cmp_ptr(a, b); ++} ++ ++static int unix_state_lock_cmp_fn(const struct lockdep_map *_a, ++ const struct lockdep_map *_b) ++{ ++ const struct unix_sock *a, *b; ++ ++ a = container_of(_a, struct unix_sock, lock.dep_map); ++ b = container_of(_b, struct unix_sock, lock.dep_map); ++ ++ /* unix_state_double_lock(): ascending address order. */ ++ return cmp_ptr(a, b); ++} ++ ++static int unix_recvq_lock_cmp_fn(const struct lockdep_map *_a, ++ const struct lockdep_map *_b) ++{ ++ const struct sock *a, *b; ++ ++ a = container_of(_a, struct sock, sk_receive_queue.lock.dep_map); ++ b = container_of(_b, struct sock, sk_receive_queue.lock.dep_map); ++ ++ /* unix_collect_skb(): listener -> embryo order. */ ++ if (a->sk_state == TCP_LISTEN && unix_sk(b)->listener == a) ++ return -1; ++ ++ /* Should never happen. Just to be symmetric. */ ++ if (b->sk_state == TCP_LISTEN && unix_sk(a)->listener == b) ++ return 1; ++ ++ return 0; ++} ++#endif + + static unsigned int unix_unbound_hash(struct sock *sk) + { +@@ -167,7 +207,7 @@ static void unix_table_double_lock(struct net *net, + swap(hash1, hash2); + + spin_lock(&net->unx.table.locks[hash1]); +- spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING); ++ spin_lock(&net->unx.table.locks[hash2]); + } + + static void unix_table_double_unlock(struct net *net, +@@ -590,6 +630,11 @@ static void unix_sock_destructor(struct sock *sk) + #endif + } + ++static unsigned int unix_skb_len(const struct sk_buff *skb) ++{ ++ return skb->len - UNIXCB(skb).consumed; ++} ++ + static void unix_release_sock(struct sock *sk, int embrion) + { + struct unix_sock *u = unix_sk(sk); +@@ -617,20 +662,23 @@ static void unix_release_sock(struct sock *sk, int embrion) + unix_state_unlock(sk); + + #if IS_ENABLED(CONFIG_AF_UNIX_OOB) +- if (u->oob_skb) { +- kfree_skb(u->oob_skb); +- u->oob_skb = NULL; +- } ++ u->oob_skb = NULL; + #endif + + wake_up_interruptible_all(&u->peer_wait); + + if (skpair != NULL) { + if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) { ++ struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); ++ ++#if IS_ENABLED(CONFIG_AF_UNIX_OOB) ++ if (skb && !unix_skb_len(skb)) ++ skb = skb_peek_next(skb, &sk->sk_receive_queue); ++#endif + unix_state_lock(skpair); + /* No more writes */ + WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK); +- if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion) ++ if (skb || embrion) + WRITE_ONCE(skpair->sk_err, ECONNRESET); + unix_state_unlock(skpair); + skpair->sk_state_change(skpair); +@@ -977,12 +1025,15 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, + sk->sk_write_space = unix_write_space; + sk->sk_max_ack_backlog = READ_ONCE(net->unx.sysctl_max_dgram_qlen); + sk->sk_destruct = unix_sock_destructor; ++ lock_set_cmp_fn(&sk->sk_receive_queue.lock, unix_recvq_lock_cmp_fn, NULL); ++ + u = unix_sk(sk); + u->listener = NULL; + u->vertex = NULL; + u->path.dentry = NULL; + u->path.mnt = NULL; + spin_lock_init(&u->lock); ++ lock_set_cmp_fn(&u->lock, unix_state_lock_cmp_fn, NULL); + mutex_init(&u->iolock); /* single task reading lock */ + mutex_init(&u->bindlock); /* single task binding lock */ + init_waitqueue_head(&u->peer_wait); +@@ -1331,11 +1382,12 @@ static void unix_state_double_lock(struct sock *sk1, struct sock *sk2) + unix_state_lock(sk1); + return; + } ++ + if (sk1 > sk2) + swap(sk1, sk2); + + unix_state_lock(sk1); +- unix_state_lock_nested(sk2, U_LOCK_SECOND); ++ unix_state_lock(sk2); + } + + static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2) +@@ -2145,13 +2197,9 @@ static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other + } + + maybe_add_creds(skb, sock, other); +- skb_get(skb); +- + scm_stat_add(other, skb); + + spin_lock(&other->sk_receive_queue.lock); +- if (ousk->oob_skb) +- consume_skb(ousk->oob_skb); + WRITE_ONCE(ousk->oob_skb, skb); + __skb_queue_tail(&other->sk_receive_queue, skb); + spin_unlock(&other->sk_receive_queue.lock); +@@ -2515,11 +2563,6 @@ static long unix_stream_data_wait(struct sock *sk, long timeo, + return timeo; + } + +-static unsigned int unix_skb_len(const struct sk_buff *skb) +-{ +- return skb->len - UNIXCB(skb).consumed; +-} +- + struct unix_stream_read_state { + int (*recv_actor)(struct sk_buff *, int, int, + struct unix_stream_read_state *); +@@ -2534,11 +2577,11 @@ struct unix_stream_read_state { + #if IS_ENABLED(CONFIG_AF_UNIX_OOB) + static int unix_stream_recv_urg(struct unix_stream_read_state *state) + { ++ struct sk_buff *oob_skb, *read_skb = NULL; + struct socket *sock = state->socket; + struct sock *sk = sock->sk; + struct unix_sock *u = unix_sk(sk); + int chunk = 1; +- struct sk_buff *oob_skb; + + mutex_lock(&u->iolock); + unix_state_lock(sk); +@@ -2553,10 +2596,15 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state) + + oob_skb = u->oob_skb; + +- if (!(state->flags & MSG_PEEK)) ++ if (!(state->flags & MSG_PEEK)) { + WRITE_ONCE(u->oob_skb, NULL); +- else +- skb_get(oob_skb); ++ ++ if (oob_skb->prev != (struct sk_buff *)&sk->sk_receive_queue && ++ !unix_skb_len(oob_skb->prev)) { ++ read_skb = oob_skb->prev; ++ __skb_unlink(read_skb, &sk->sk_receive_queue); ++ } ++ } + + spin_unlock(&sk->sk_receive_queue.lock); + unix_state_unlock(sk); +@@ -2566,10 +2614,10 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state) + if (!(state->flags & MSG_PEEK)) + UNIXCB(oob_skb).consumed += 1; + +- consume_skb(oob_skb); +- + mutex_unlock(&u->iolock); + ++ consume_skb(read_skb); ++ + if (chunk < 0) + return -EFAULT; + +@@ -2595,12 +2643,10 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk, + if (copied) { + skb = NULL; + } else if (!(flags & MSG_PEEK)) { +- if (sock_flag(sk, SOCK_URGINLINE)) { +- WRITE_ONCE(u->oob_skb, NULL); +- consume_skb(skb); +- } else { ++ WRITE_ONCE(u->oob_skb, NULL); ++ ++ if (!sock_flag(sk, SOCK_URGINLINE)) { + __skb_unlink(skb, &sk->sk_receive_queue); +- WRITE_ONCE(u->oob_skb, NULL); + unlinked_skb = skb; + skb = skb_peek(&sk->sk_receive_queue); + } +@@ -2611,10 +2657,7 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk, + + spin_unlock(&sk->sk_receive_queue.lock); + +- if (unlinked_skb) { +- WARN_ON_ONCE(skb_unref(unlinked_skb)); +- kfree_skb(unlinked_skb); +- } ++ kfree_skb(unlinked_skb); + } + return skb; + } +@@ -2657,7 +2700,6 @@ static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor) + unix_state_unlock(sk); + + if (drop) { +- WARN_ON_ONCE(skb_unref(skb)); + kfree_skb(skb); + return -EAGAIN; + } +@@ -3598,6 +3640,7 @@ static int __net_init unix_net_init(struct net *net) + + for (i = 0; i < UNIX_HASH_SIZE; i++) { + spin_lock_init(&net->unx.table.locks[i]); ++ lock_set_cmp_fn(&net->unx.table.locks[i], unix_table_lock_cmp_fn, NULL); + INIT_HLIST_HEAD(&net->unx.table.buckets[i]); + } + +diff --git a/net/unix/garbage.c b/net/unix/garbage.c +index 23efb78fe9ef4b..0068e758be4ddb 100644 +--- a/net/unix/garbage.c ++++ b/net/unix/garbage.c +@@ -337,23 +337,6 @@ static bool unix_vertex_dead(struct unix_vertex *vertex) + return true; + } + +-enum unix_recv_queue_lock_class { +- U_RECVQ_LOCK_NORMAL, +- U_RECVQ_LOCK_EMBRYO, +-}; +- +-static void unix_collect_queue(struct unix_sock *u, struct sk_buff_head *hitlist) +-{ +- skb_queue_splice_init(&u->sk.sk_receive_queue, hitlist); +- +-#if IS_ENABLED(CONFIG_AF_UNIX_OOB) +- if (u->oob_skb) { +- WARN_ON_ONCE(skb_unref(u->oob_skb)); +- u->oob_skb = NULL; +- } +-#endif +-} +- + static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist) + { + struct unix_vertex *vertex; +@@ -375,13 +358,12 @@ static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist + skb_queue_walk(queue, skb) { + struct sk_buff_head *embryo_queue = &skb->sk->sk_receive_queue; + +- /* listener -> embryo order, the inversion never happens. */ +- spin_lock_nested(&embryo_queue->lock, U_RECVQ_LOCK_EMBRYO); +- unix_collect_queue(unix_sk(skb->sk), hitlist); ++ spin_lock(&embryo_queue->lock); ++ skb_queue_splice_init(embryo_queue, hitlist); + spin_unlock(&embryo_queue->lock); + } + } else { +- unix_collect_queue(u, hitlist); ++ skb_queue_splice_init(queue, hitlist); + } + + spin_unlock(&queue->lock); +diff --git a/rust/macros/module.rs b/rust/macros/module.rs +index 7dee348ef0cc82..7614a7198ce206 100644 +--- a/rust/macros/module.rs ++++ b/rust/macros/module.rs +@@ -249,6 +249,7 @@ mod __module_init {{ + #[cfg(MODULE)] + #[doc(hidden)] + #[no_mangle] ++ #[link_section = \".exit.text\"] + pub extern \"C\" fn cleanup_module() {{ + // SAFETY: + // - This function is inaccessible to the outside due to the double +diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl +index f27d552aec43f2..aad423c5181a96 100755 +--- a/scripts/checkstack.pl ++++ b/scripts/checkstack.pl +@@ -68,9 +68,6 @@ my (@stack, $re, $dre, $sub, $x, $xs, $funcre, $min_stack); + # 2f60: 48 81 ec e8 05 00 00 sub $0x5e8,%rsp + $re = qr/^.*[as][du][db] \$(0x$x{1,8}),\%(e|r)sp$/o; + $dre = qr/^.*[as][du][db] (%.*),\%(e|r)sp$/o; +- } elsif ($arch eq 'ia64') { +- #e0000000044011fc: 01 0f fc 8c adds r12=-384,r12 +- $re = qr/.*adds.*r12=-(([0-9]{2}|[3-9])[0-9]{2}),r12/o; + } elsif ($arch eq 'm68k') { + # 2b6c: 4e56 fb70 linkw %fp,#-1168 + # 1df770: defc ffe4 addaw #-28,%sp +diff --git a/scripts/gdb/linux/tasks.py b/scripts/gdb/linux/tasks.py +index 17ec19e9b5bf6a..5be53b372a6938 100644 +--- a/scripts/gdb/linux/tasks.py ++++ b/scripts/gdb/linux/tasks.py +@@ -86,21 +86,12 @@ LxPs() + + thread_info_type = utils.CachedType("struct thread_info") + +-ia64_task_size = None +- + + def get_thread_info(task): + thread_info_ptr_type = thread_info_type.get_type().pointer() +- if utils.is_target_arch("ia64"): +- global ia64_task_size +- if ia64_task_size is None: +- ia64_task_size = gdb.parse_and_eval("sizeof(struct task_struct)") +- thread_info_addr = task.address + ia64_task_size +- thread_info = thread_info_addr.cast(thread_info_ptr_type) +- else: +- if task.type.fields()[0].type == thread_info_type.get_type(): +- return task['thread_info'] +- thread_info = task['stack'].cast(thread_info_ptr_type) ++ if task.type.fields()[0].type == thread_info_type.get_type(): ++ return task['thread_info'] ++ thread_info = task['stack'].cast(thread_info_ptr_type) + return thread_info.dereference() + + +diff --git a/scripts/head-object-list.txt b/scripts/head-object-list.txt +index 26359968744ef1..890f69005bab41 100644 +--- a/scripts/head-object-list.txt ++++ b/scripts/head-object-list.txt +@@ -17,7 +17,6 @@ arch/arm/kernel/head-nommu.o + arch/arm/kernel/head.o + arch/csky/kernel/head.o + arch/hexagon/kernel/head.o +-arch/ia64/kernel/head.o + arch/loongarch/kernel/head.o + arch/m68k/68000/head.o + arch/m68k/coldfire/head.o +diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c +index eccc87a441e713..3795c36a9181aa 100644 +--- a/scripts/kconfig/mconf.c ++++ b/scripts/kconfig/mconf.c +@@ -247,7 +247,7 @@ search_help[] = + " -> PCI support (PCI [=y])\n" + "(1) -> PCI access mode ( [=y])\n" + " Defined at drivers/pci/Kconfig:47\n" +- " Depends on: X86_LOCAL_APIC && X86_IO_APIC || IA64\n" ++ " Depends on: X86_LOCAL_APIC && X86_IO_APIC\n" + " Selects: LIBCRC32\n" + " Selected by: BAR [=n]\n" + "-----------------------------------------------------------------\n" +diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c +index 143a2c351d5764..8cd72fe2597405 100644 +--- a/scripts/kconfig/nconf.c ++++ b/scripts/kconfig/nconf.c +@@ -216,7 +216,7 @@ search_help[] = + "Symbol: FOO [ = m]\n" + "Prompt: Foo bus is used to drive the bar HW\n" + "Defined at drivers/pci/Kconfig:47\n" +-"Depends on: X86_LOCAL_APIC && X86_IO_APIC || IA64\n" ++"Depends on: X86_LOCAL_APIC && X86_IO_APIC\n" + "Location:\n" + " -> Bus options (PCI, PCMCIA, EISA, ISA)\n" + " -> PCI support (PCI [ = y])\n" +diff --git a/scripts/package/kernel.spec b/scripts/package/kernel.spec +index 3eee0143e0c5cc..f58726671fb374 100644 +--- a/scripts/package/kernel.spec ++++ b/scripts/package/kernel.spec +@@ -55,18 +55,12 @@ patch -p1 < %{SOURCE2} + %{make} %{makeflags} KERNELRELEASE=%{KERNELRELEASE} KBUILD_BUILD_VERSION=%{release} + + %install +-mkdir -p %{buildroot}/boot +-%ifarch ia64 +-mkdir -p %{buildroot}/boot/efi +-cp $(%{make} %{makeflags} -s image_name) %{buildroot}/boot/efi/vmlinuz-%{KERNELRELEASE} +-ln -s efi/vmlinuz-%{KERNELRELEASE} %{buildroot}/boot/ +-%else +-cp $(%{make} %{makeflags} -s image_name) %{buildroot}/boot/vmlinuz-%{KERNELRELEASE} +-%endif ++mkdir -p %{buildroot}/lib/modules/%{KERNELRELEASE} ++cp $(%{make} %{makeflags} -s image_name) %{buildroot}/lib/modules/%{KERNELRELEASE}/vmlinuz + %{make} %{makeflags} INSTALL_MOD_PATH=%{buildroot} modules_install + %{make} %{makeflags} INSTALL_HDR_PATH=%{buildroot}/usr headers_install +-cp System.map %{buildroot}/boot/System.map-%{KERNELRELEASE} +-cp .config %{buildroot}/boot/config-%{KERNELRELEASE} ++cp System.map %{buildroot}/lib/modules/%{KERNELRELEASE} ++cp .config %{buildroot}/lib/modules/%{KERNELRELEASE}/config + ln -fns /usr/src/kernels/%{KERNELRELEASE} %{buildroot}/lib/modules/%{KERNELRELEASE}/build + %if %{with_devel} + %{make} %{makeflags} run-command KBUILD_RUN_COMMAND='${srctree}/scripts/package/install-extmod-build %{buildroot}/usr/src/kernels/%{KERNELRELEASE}' +@@ -76,13 +70,14 @@ ln -fns /usr/src/kernels/%{KERNELRELEASE} %{buildroot}/lib/modules/%{KERNELRELEA + rm -rf %{buildroot} + + %post +-if [ -x /sbin/installkernel -a -r /boot/vmlinuz-%{KERNELRELEASE} -a -r /boot/System.map-%{KERNELRELEASE} ]; then +-cp /boot/vmlinuz-%{KERNELRELEASE} /boot/.vmlinuz-%{KERNELRELEASE}-rpm +-cp /boot/System.map-%{KERNELRELEASE} /boot/.System.map-%{KERNELRELEASE}-rpm +-rm -f /boot/vmlinuz-%{KERNELRELEASE} /boot/System.map-%{KERNELRELEASE} +-/sbin/installkernel %{KERNELRELEASE} /boot/.vmlinuz-%{KERNELRELEASE}-rpm /boot/.System.map-%{KERNELRELEASE}-rpm +-rm -f /boot/.vmlinuz-%{KERNELRELEASE}-rpm /boot/.System.map-%{KERNELRELEASE}-rpm ++if [ -x /usr/bin/kernel-install ]; then ++ /usr/bin/kernel-install add %{KERNELRELEASE} /lib/modules/%{KERNELRELEASE}/vmlinuz + fi ++for file in vmlinuz System.map config; do ++ if ! cmp --silent "/lib/modules/%{KERNELRELEASE}/${file}" "/boot/${file}-%{KERNELRELEASE}"; then ++ cp "/lib/modules/%{KERNELRELEASE}/${file}" "/boot/${file}-%{KERNELRELEASE}" ++ fi ++done + + %preun + if [ -x /sbin/new-kernel-pkg ]; then +@@ -100,7 +95,6 @@ fi + %defattr (-, root, root) + /lib/modules/%{KERNELRELEASE} + %exclude /lib/modules/%{KERNELRELEASE}/build +-/boot/* + + %files headers + %defattr (-, root, root) +diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian +index 5044224cf6714b..c1a36da85e84f6 100755 +--- a/scripts/package/mkdebian ++++ b/scripts/package/mkdebian +@@ -26,7 +26,7 @@ set_debarch() { + + # Attempt to find the correct Debian architecture + case "$UTS_MACHINE" in +- i386|ia64|alpha|m68k|riscv*) ++ i386|alpha|m68k|riscv*) + debarch="$UTS_MACHINE" ;; + x86_64) + debarch=amd64 ;; +diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c +index 40ae6b2c7a6da5..3e4f54799cc0a5 100644 +--- a/scripts/recordmcount.c ++++ b/scripts/recordmcount.c +@@ -590,7 +590,6 @@ static int do_file(char const *const fname) + ideal_nop = ideal_nop4_arm64; + is_fake_mcount64 = arm64_is_fake_mcount; + break; +- case EM_IA_64: reltype = R_IA64_IMM64; break; + case EM_MIPS: /* reltype: e_class */ break; + case EM_LOONGARCH: /* reltype: e_class */ break; + case EM_PPC: reltype = R_PPC_ADDR32; break; +diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl +index 6a4645a5797603..f84df9e383fd0a 100755 +--- a/scripts/recordmcount.pl ++++ b/scripts/recordmcount.pl +@@ -275,13 +275,6 @@ if ($arch eq "x86_64") { + $section_type = '%progbits'; + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_AARCH64_CALL26\\s+_mcount\$"; + $type = ".quad"; +-} elsif ($arch eq "ia64") { +- $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$"; +- $type = "data8"; +- +- if ($is_module eq "0") { +- $cc .= " -mconstant-gp"; +- } + } elsif ($arch eq "sparc64") { + # In the objdump output there are giblets like: + # 0000000000000000 : +diff --git a/scripts/xz_wrap.sh b/scripts/xz_wrap.sh +index 76e9cbcfbeab45..d06baf626abe79 100755 +--- a/scripts/xz_wrap.sh ++++ b/scripts/xz_wrap.sh +@@ -15,7 +15,6 @@ LZMA2OPTS= + case $SRCARCH in + x86) BCJ=--x86 ;; + powerpc) BCJ=--powerpc ;; +- ia64) BCJ=--ia64; LZMA2OPTS=pb=4 ;; + arm) BCJ=--arm ;; + sparc) BCJ=--sparc ;; + esac +diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c +index b7ca2a83fbb086..95786bdadfe6a5 100644 +--- a/sound/pci/hda/hda_bind.c ++++ b/sound/pci/hda/hda_bind.c +@@ -44,7 +44,7 @@ static void hda_codec_unsol_event(struct hdac_device *dev, unsigned int ev) + struct hda_codec *codec = container_of(dev, struct hda_codec, core); + + /* ignore unsol events during shutdown */ +- if (codec->bus->shutdown) ++ if (codec->card->shutdown || codec->bus->shutdown) + return; + + /* ignore unsol events during system suspend/resume */ +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 3cd5b7da8e1528..059693e03fd962 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -2727,6 +2727,9 @@ static const struct pci_device_id azx_ids[] = { + { PCI_VDEVICE(ATI, 0xab38), + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS | + AZX_DCAPS_PM_RUNTIME }, ++ { PCI_VDEVICE(ATI, 0xab40), ++ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS | ++ AZX_DCAPS_PM_RUNTIME }, + /* GLENFLY */ + { PCI_DEVICE(PCI_VENDOR_ID_GLENFLY, PCI_ANY_ID), + .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 82210b1e3b9782..0d367cec03adef 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -10325,6 +10325,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE), + SND_PCI_QUIRK(0x1043, 0x1da2, "ASUS UP6502ZA/ZD", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2), ++ SND_PCI_QUIRK(0x1043, 0x1e10, "ASUS VivoBook X507UAR", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502), + SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS), +diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c +index 9fdee74c28df27..40e2b5a87916a8 100644 +--- a/sound/soc/amd/yc/acp6x-mach.c ++++ b/sound/soc/amd/yc/acp6x-mach.c +@@ -353,6 +353,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "83J2"), + } + }, ++ { ++ .driver_data = &acp6x_card, ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "83J3"), ++ } ++ }, + { + .driver_data = &acp6x_card, + .matches = { +diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c +index a05b553e6472f4..8d5d186fd58022 100644 +--- a/sound/soc/codecs/wcd9335.c ++++ b/sound/soc/codecs/wcd9335.c +@@ -16,7 +16,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + #include +@@ -329,8 +329,7 @@ struct wcd9335_codec { + int comp_enabled[COMPANDER_MAX]; + + int intr1; +- int reset_gpio; +- struct regulator_bulk_data supplies[WCD9335_MAX_SUPPLY]; ++ struct gpio_desc *reset_gpio; + + unsigned int rx_port_value[WCD9335_RX_MAX]; + unsigned int tx_port_value[WCD9335_TX_MAX]; +@@ -357,6 +356,10 @@ struct wcd9335_irq { + char *name; + }; + ++static const char * const wcd9335_supplies[] = { ++ "vdd-buck", "vdd-buck-sido", "vdd-tx", "vdd-rx", "vdd-io", ++}; ++ + static const struct wcd9335_slim_ch wcd9335_tx_chs[WCD9335_TX_MAX] = { + WCD9335_SLIM_TX_CH(0), + WCD9335_SLIM_TX_CH(1), +@@ -5032,53 +5035,30 @@ static const struct regmap_irq_chip wcd9335_regmap_irq1_chip = { + static int wcd9335_parse_dt(struct wcd9335_codec *wcd) + { + struct device *dev = wcd->dev; +- struct device_node *np = dev->of_node; + int ret; + +- wcd->reset_gpio = of_get_named_gpio(np, "reset-gpios", 0); +- if (wcd->reset_gpio < 0) { +- dev_err(dev, "Reset GPIO missing from DT\n"); +- return wcd->reset_gpio; +- } ++ wcd->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); ++ if (IS_ERR(wcd->reset_gpio)) ++ return dev_err_probe(dev, PTR_ERR(wcd->reset_gpio), "Reset GPIO missing from DT\n"); + + wcd->mclk = devm_clk_get(dev, "mclk"); +- if (IS_ERR(wcd->mclk)) { +- dev_err(dev, "mclk not found\n"); +- return PTR_ERR(wcd->mclk); +- } ++ if (IS_ERR(wcd->mclk)) ++ return dev_err_probe(dev, PTR_ERR(wcd->mclk), "mclk not found\n"); + + wcd->native_clk = devm_clk_get(dev, "slimbus"); +- if (IS_ERR(wcd->native_clk)) { +- dev_err(dev, "slimbus clock not found\n"); +- return PTR_ERR(wcd->native_clk); +- } ++ if (IS_ERR(wcd->native_clk)) ++ return dev_err_probe(dev, PTR_ERR(wcd->native_clk), "slimbus clock not found\n"); + +- wcd->supplies[0].supply = "vdd-buck"; +- wcd->supplies[1].supply = "vdd-buck-sido"; +- wcd->supplies[2].supply = "vdd-tx"; +- wcd->supplies[3].supply = "vdd-rx"; +- wcd->supplies[4].supply = "vdd-io"; +- +- ret = regulator_bulk_get(dev, WCD9335_MAX_SUPPLY, wcd->supplies); +- if (ret) { +- dev_err(dev, "Failed to get supplies: err = %d\n", ret); +- return ret; +- } ++ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(wcd9335_supplies), ++ wcd9335_supplies); ++ if (ret) ++ return dev_err_probe(dev, ret, "Failed to get and enable supplies\n"); + + return 0; + } + + static int wcd9335_power_on_reset(struct wcd9335_codec *wcd) + { +- struct device *dev = wcd->dev; +- int ret; +- +- ret = regulator_bulk_enable(WCD9335_MAX_SUPPLY, wcd->supplies); +- if (ret) { +- dev_err(dev, "Failed to get supplies: err = %d\n", ret); +- return ret; +- } +- + /* + * For WCD9335, it takes about 600us for the Vout_A and + * Vout_D to be ready after BUCK_SIDO is powered up. +@@ -5088,9 +5068,9 @@ static int wcd9335_power_on_reset(struct wcd9335_codec *wcd) + */ + usleep_range(600, 650); + +- gpio_direction_output(wcd->reset_gpio, 0); ++ gpiod_set_value(wcd->reset_gpio, 1); + msleep(20); +- gpio_set_value(wcd->reset_gpio, 1); ++ gpiod_set_value(wcd->reset_gpio, 0); + msleep(20); + + return 0; +@@ -5163,10 +5143,8 @@ static int wcd9335_slim_probe(struct slim_device *slim) + + wcd->dev = dev; + ret = wcd9335_parse_dt(wcd); +- if (ret) { +- dev_err(dev, "Error parsing DT: %d\n", ret); ++ if (ret) + return ret; +- } + + ret = wcd9335_power_on_reset(wcd); + if (ret) +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c +index 0b8b20550ab381..f19c808444c97a 100644 +--- a/sound/usb/quirks.c ++++ b/sound/usb/quirks.c +@@ -2182,6 +2182,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { + QUIRK_FLAG_DISABLE_AUTOSUSPEND), + DEVICE_FLG(0x17aa, 0x104d, /* Lenovo ThinkStation P620 Internal Speaker + Front Headset */ + QUIRK_FLAG_DISABLE_AUTOSUSPEND), ++ DEVICE_FLG(0x17ef, 0x3083, /* Lenovo TBT3 dock */ ++ QUIRK_FLAG_GET_SAMPLE_RATE), + DEVICE_FLG(0x1852, 0x5062, /* Luxman D-08u */ + QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY), + DEVICE_FLG(0x1852, 0x5065, /* Luxman DA-06 */ +diff --git a/sound/usb/stream.c b/sound/usb/stream.c +index e14c725acebf2c..0f1558ef855535 100644 +--- a/sound/usb/stream.c ++++ b/sound/usb/stream.c +@@ -982,6 +982,8 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip, + * and request Cluster Descriptor + */ + wLength = le16_to_cpu(hc_header.wLength); ++ if (wLength < sizeof(cluster)) ++ return NULL; + cluster = kzalloc(wLength, GFP_KERNEL); + if (!cluster) + return ERR_PTR(-ENOMEM); +diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c +index ebf56d21d08eed..cf4db51b99eb5d 100644 +--- a/tools/lib/bpf/btf_dump.c ++++ b/tools/lib/bpf/btf_dump.c +@@ -225,6 +225,9 @@ static void btf_dump_free_names(struct hashmap *map) + size_t bkt; + struct hashmap_entry *cur; + ++ if (!map) ++ return; ++ + hashmap__for_each_entry(map, cur, bkt) + free((void *)cur->pkey); + +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c +index 5dc2e555533586..aefbfa2df6207c 100644 +--- a/tools/lib/bpf/libbpf.c ++++ b/tools/lib/bpf/libbpf.c +@@ -554,7 +554,7 @@ struct extern_desc { + int sym_idx; + int btf_id; + int sec_btf_id; +- const char *name; ++ char *name; + char *essent_name; + bool is_set; + bool is_weak; +@@ -3822,7 +3822,9 @@ static int bpf_object__collect_externs(struct bpf_object *obj) + return ext->btf_id; + } + t = btf__type_by_id(obj->btf, ext->btf_id); +- ext->name = btf__name_by_offset(obj->btf, t->name_off); ++ ext->name = strdup(btf__name_by_offset(obj->btf, t->name_off)); ++ if (!ext->name) ++ return -ENOMEM; + ext->sym_idx = i; + ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK; + +@@ -8457,8 +8459,10 @@ void bpf_object__close(struct bpf_object *obj) + zfree(&obj->btf_custom_path); + zfree(&obj->kconfig); + +- for (i = 0; i < obj->nr_extern; i++) ++ for (i = 0; i < obj->nr_extern; i++) { ++ zfree(&obj->externs[i].name); + zfree(&obj->externs[i].essent_name); ++ } + + zfree(&obj->externs); + obj->nr_extern = 0; +diff --git a/tools/testing/selftests/bpf/progs/test_global_map_resize.c b/tools/testing/selftests/bpf/progs/test_global_map_resize.c +index 1fbb73d3e5d5a0..9be0e32cfeeea4 100644 +--- a/tools/testing/selftests/bpf/progs/test_global_map_resize.c ++++ b/tools/testing/selftests/bpf/progs/test_global_map_resize.c +@@ -31,6 +31,16 @@ int my_int_last SEC(".data.array_not_last"); + + int percpu_arr[1] SEC(".data.percpu_arr"); + ++/* at least one extern is included, to ensure that a specific ++ * regression is tested whereby resizing resulted in a free-after-use ++ * bug after type information is invalidated by the resize operation. ++ * ++ * There isn't a particularly good API to test for this specific condition, ++ * but by having externs for the resizing tests it will cover this path. ++ */ ++extern int LINUX_KERNEL_VERSION __kconfig; ++long version_sink; ++ + SEC("tp/syscalls/sys_enter_getpid") + int bss_array_sum(void *ctx) + { +@@ -43,6 +53,9 @@ int bss_array_sum(void *ctx) + for (size_t i = 0; i < bss_array_len; ++i) + sum += array[i]; + ++ /* see above; ensure this is not optimized out */ ++ version_sink = LINUX_KERNEL_VERSION; ++ + return 0; + } + +@@ -58,5 +71,8 @@ int data_array_sum(void *ctx) + for (size_t i = 0; i < data_array_len; ++i) + sum += my_array[i]; + ++ /* see above; ensure this is not optimized out */ ++ version_sink = LINUX_KERNEL_VERSION; ++ + return 0; + } diff --git a/patch/kernel/archive/spacemit-6.6/patch-6.6.96-97.patch b/patch/kernel/archive/spacemit-6.6/patch-6.6.96-97.patch new file mode 100644 index 000000000..36cf6a967 --- /dev/null +++ b/patch/kernel/archive/spacemit-6.6/patch-6.6.96-97.patch @@ -0,0 +1,6094 @@ +diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu +index 0426ec112155ec..868ec736a9d235 100644 +--- a/Documentation/ABI/testing/sysfs-devices-system-cpu ++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu +@@ -526,6 +526,7 @@ What: /sys/devices/system/cpu/vulnerabilities + /sys/devices/system/cpu/vulnerabilities/spectre_v1 + /sys/devices/system/cpu/vulnerabilities/spectre_v2 + /sys/devices/system/cpu/vulnerabilities/srbds ++ /sys/devices/system/cpu/vulnerabilities/tsa + /sys/devices/system/cpu/vulnerabilities/tsx_async_abort + Date: January 2018 + Contact: Linux kernel mailing list +diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs +index 0c7efaf62de0c0..84131641580c95 100644 +--- a/Documentation/ABI/testing/sysfs-driver-ufs ++++ b/Documentation/ABI/testing/sysfs-driver-ufs +@@ -711,7 +711,7 @@ Description: This file shows the thin provisioning type. This is one of + + The file is read only. + +-What: /sys/class/scsi_device/*/device/unit_descriptor/physical_memory_resourse_count ++What: /sys/class/scsi_device/*/device/unit_descriptor/physical_memory_resource_count + Date: February 2018 + Contact: Stanislav Nijnikov + Description: This file shows the total physical memory resources. This is +diff --git a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst +index c98fd11907cc87..e916dc232b0f0c 100644 +--- a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst ++++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst +@@ -157,9 +157,7 @@ This is achieved by using the otherwise unused and obsolete VERW instruction in + combination with a microcode update. The microcode clears the affected CPU + buffers when the VERW instruction is executed. + +-Kernel reuses the MDS function to invoke the buffer clearing: +- +- mds_clear_cpu_buffers() ++Kernel does the buffer clearing with x86_clear_cpu_buffers(). + + On MDS affected CPUs, the kernel already invokes CPU buffer clear on + kernel/userspace, hypervisor/guest and C-state (idle) transitions. No +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index f95734ceb82b86..bcfa49019c3f16 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -6645,6 +6645,19 @@ + If not specified, "default" is used. In this case, + the RNG's choice is left to each individual trust source. + ++ tsa= [X86] Control mitigation for Transient Scheduler ++ Attacks on AMD CPUs. Search the following in your ++ favourite search engine for more details: ++ ++ "Technical guidance for mitigating transient scheduler ++ attacks". ++ ++ off - disable the mitigation ++ on - enable the mitigation (default) ++ user - mitigate only user/kernel transitions ++ vm - mitigate only guest/host transitions ++ ++ + tsc= Disable clocksource stability checks for TSC. + Format: + [x86] reliable: mark tsc clocksource as reliable, this +diff --git a/Documentation/arch/x86/mds.rst b/Documentation/arch/x86/mds.rst +index c58c72362911cd..43106f349cc35f 100644 +--- a/Documentation/arch/x86/mds.rst ++++ b/Documentation/arch/x86/mds.rst +@@ -93,7 +93,7 @@ enters a C-state. + + The kernel provides a function to invoke the buffer clearing: + +- mds_clear_cpu_buffers() ++ x86_clear_cpu_buffers() + + Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path. + Other than CFLAGS.ZF, this macro doesn't clobber any registers. +@@ -185,9 +185,9 @@ Mitigation points + idle clearing would be a window dressing exercise and is therefore not + activated. + +- The invocation is controlled by the static key mds_idle_clear which is +- switched depending on the chosen mitigation mode and the SMT state of +- the system. ++ The invocation is controlled by the static key cpu_buf_idle_clear which is ++ switched depending on the chosen mitigation mode and the SMT state of the ++ system. + + The buffer clear is only invoked before entering the C-State to prevent + that stale data from the idling CPU from spilling to the Hyper-Thread +diff --git a/Documentation/core-api/symbol-namespaces.rst b/Documentation/core-api/symbol-namespaces.rst +index 12e4aecdae9452..29875e25e376f6 100644 +--- a/Documentation/core-api/symbol-namespaces.rst ++++ b/Documentation/core-api/symbol-namespaces.rst +@@ -28,6 +28,9 @@ kernel. As of today, modules that make use of symbols exported into namespaces, + are required to import the namespace. Otherwise the kernel will, depending on + its configuration, reject loading the module or warn about a missing import. + ++Additionally, it is possible to put symbols into a module namespace, strictly ++limiting which modules are allowed to use these symbols. ++ + 2. How to define Symbol Namespaces + ================================== + +@@ -84,6 +87,22 @@ unit as preprocessor statement. The above example would then read:: + within the corresponding compilation unit before any EXPORT_SYMBOL macro is + used. + ++2.3 Using the EXPORT_SYMBOL_GPL_FOR_MODULES() macro ++=================================================== ++ ++Symbols exported using this macro are put into a module namespace. This ++namespace cannot be imported. ++ ++The macro takes a comma separated list of module names, allowing only those ++modules to access this symbol. Simple tail-globs are supported. ++ ++For example: ++ ++ EXPORT_SYMBOL_GPL_FOR_MODULES(preempt_notifier_inc, "kvm,kvm-*") ++ ++will limit usage of this symbol to modules whoes name matches the given ++patterns. ++ + 3. How to use Symbols exported in Namespaces + ============================================ + +@@ -155,3 +174,6 @@ in-tree modules:: + You can also run nsdeps for external module builds. A typical usage is:: + + $ make -C M=$PWD nsdeps ++ ++Note: it will happily generate an import statement for the module namespace; ++which will not work and generates build and runtime failures. +diff --git a/Makefile b/Makefile +index 038fc8e0982bdc..9d5c08363637bd 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 96 ++SUBLEVEL = 97 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +diff --git a/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi b/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi +index 5988a4eb6efaa0..cb78ce7af0b380 100644 +--- a/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi ++++ b/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi +@@ -71,7 +71,7 @@ hpm1: usb-pd@3f { + */ + &port00 { + bus-range = <1 1>; +- wifi0: network@0,0 { ++ wifi0: wifi@0,0 { + compatible = "pci14e4,4425"; + reg = <0x10000 0x0 0x0 0x0 0x0>; + /* To be filled by the loader */ +diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi +index c14c6f8583d548..2f0f1c2ab7391f 100644 +--- a/arch/arm64/boot/dts/qcom/sm8550.dtsi ++++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi +@@ -1064,6 +1064,20 @@ spi13: spi@894000 { + status = "disabled"; + }; + ++ uart14: serial@898000 { ++ compatible = "qcom,geni-uart"; ++ reg = <0 0x898000 0 0x4000>; ++ clock-names = "se"; ++ clocks = <&gcc GCC_QUPV3_WRAP2_S6_CLK>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&qup_uart14_default>, <&qup_uart14_cts_rts>; ++ interrupts = ; ++ interconnects = <&clk_virt MASTER_QUP_CORE_2 0 &clk_virt SLAVE_QUP_CORE_2 0>, ++ <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_QUP_2 0>; ++ interconnect-names = "qup-core", "qup-config"; ++ status = "disabled"; ++ }; ++ + i2c15: i2c@89c000 { + compatible = "qcom,geni-i2c"; + reg = <0 0x0089c000 0 0x4000>; +@@ -3640,6 +3654,22 @@ qup_uart7_default: qup-uart7-default-state { + bias-disable; + }; + ++ qup_uart14_default: qup-uart14-default-state { ++ /* TX, RX */ ++ pins = "gpio78", "gpio79"; ++ function = "qup2_se6"; ++ drive-strength = <2>; ++ bias-pull-up; ++ }; ++ ++ qup_uart14_cts_rts: qup-uart14-cts-rts-state { ++ /* CTS, RTS */ ++ pins = "gpio76", "gpio77"; ++ function = "qup2_se6"; ++ drive-strength = <2>; ++ bias-pull-down; ++ }; ++ + sdc2_sleep: sdc2-sleep-state { + clk-pins { + pins = "sdc2_clk"; +diff --git a/arch/powerpc/include/uapi/asm/ioctls.h b/arch/powerpc/include/uapi/asm/ioctls.h +index 2c145da3b774a1..b5211e413829a2 100644 +--- a/arch/powerpc/include/uapi/asm/ioctls.h ++++ b/arch/powerpc/include/uapi/asm/ioctls.h +@@ -23,10 +23,10 @@ + #define TCSETSW _IOW('t', 21, struct termios) + #define TCSETSF _IOW('t', 22, struct termios) + +-#define TCGETA _IOR('t', 23, struct termio) +-#define TCSETA _IOW('t', 24, struct termio) +-#define TCSETAW _IOW('t', 25, struct termio) +-#define TCSETAF _IOW('t', 28, struct termio) ++#define TCGETA 0x40147417 /* _IOR('t', 23, struct termio) */ ++#define TCSETA 0x80147418 /* _IOW('t', 24, struct termio) */ ++#define TCSETAW 0x80147419 /* _IOW('t', 25, struct termio) */ ++#define TCSETAF 0x8014741c /* _IOW('t', 28, struct termio) */ + + #define TCSBRK _IO('t', 29) + #define TCXONC _IO('t', 30) +diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile +index b7629122680b1e..131c859b24679e 100644 +--- a/arch/powerpc/kernel/Makefile ++++ b/arch/powerpc/kernel/Makefile +@@ -165,9 +165,7 @@ endif + + obj64-$(CONFIG_PPC_TRANSACTIONAL_MEM) += tm.o + +-ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC_CORE)$(CONFIG_PPC_BOOK3S),) + obj-y += ppc_save_regs.o +-endif + + obj-$(CONFIG_EPAPR_PARAVIRT) += epapr_paravirt.o epapr_hcalls.o + obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o +diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c +index b3961f1016ea0b..d969f36bf186f2 100644 +--- a/arch/s390/pci/pci_event.c ++++ b/arch/s390/pci/pci_event.c +@@ -98,6 +98,10 @@ static pci_ers_result_t zpci_event_do_error_state_clear(struct pci_dev *pdev, + struct zpci_dev *zdev = to_zpci(pdev); + int rc; + ++ /* The underlying device may have been disabled by the event */ ++ if (!zdev_enabled(zdev)) ++ return PCI_ERS_RESULT_NEED_RESET; ++ + pr_info("%s: Unblocking device access for examination\n", pci_name(pdev)); + rc = zpci_reset_load_store_blocked(zdev); + if (rc) { +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index 4372657ab0d6fa..caa6adcedc18dd 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -2621,6 +2621,15 @@ config MITIGATION_ITS + disabled, mitigation cannot be enabled via cmdline. + See + ++config MITIGATION_TSA ++ bool "Mitigate Transient Scheduler Attacks" ++ depends on CPU_SUP_AMD ++ default y ++ help ++ Enable mitigation for Transient Scheduler Attacks. TSA is a hardware ++ security vulnerability on AMD CPUs which can lead to forwarding of ++ invalid info to subsequent instructions and thus can affect their ++ timing and thereby cause a leakage. + endif + + config ARCH_HAS_ADD_PAGES +diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S +index ad292c0d971a3f..4e7ecffee762ad 100644 +--- a/arch/x86/entry/entry.S ++++ b/arch/x86/entry/entry.S +@@ -31,20 +31,20 @@ EXPORT_SYMBOL_GPL(entry_ibpb); + + /* + * Define the VERW operand that is disguised as entry code so that +- * it can be referenced with KPTI enabled. This ensure VERW can be ++ * it can be referenced with KPTI enabled. This ensures VERW can be + * used late in exit-to-user path after page tables are switched. + */ + .pushsection .entry.text, "ax" + + .align L1_CACHE_BYTES, 0xcc +-SYM_CODE_START_NOALIGN(mds_verw_sel) ++SYM_CODE_START_NOALIGN(x86_verw_sel) + UNWIND_HINT_UNDEFINED + ANNOTATE_NOENDBR + .word __KERNEL_DS + .align L1_CACHE_BYTES, 0xcc +-SYM_CODE_END(mds_verw_sel); ++SYM_CODE_END(x86_verw_sel); + /* For KVM */ +-EXPORT_SYMBOL_GPL(mds_verw_sel); ++EXPORT_SYMBOL_GPL(x86_verw_sel); + + .popsection + +diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h +index fecc4fe1d68aff..9c67f8b4c91971 100644 +--- a/arch/x86/include/asm/cpu.h ++++ b/arch/x86/include/asm/cpu.h +@@ -81,4 +81,16 @@ int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type); + + extern struct cpumask cpus_stop_mask; + ++union zen_patch_rev { ++ struct { ++ __u32 rev : 8, ++ stepping : 4, ++ model : 4, ++ __reserved : 4, ++ ext_model : 4, ++ ext_fam : 8; ++ }; ++ __u32 ucode_rev; ++}; ++ + #endif /* _ASM_X86_CPU_H */ +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index 8a2482651a6f1e..311cc58f29581d 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -449,6 +449,7 @@ + /* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */ + #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */ + #define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* "" LFENCE always serializing / synchronizes RDTSC */ ++#define X86_FEATURE_VERW_CLEAR (20*32+ 5) /* "" The memory form of VERW mitigates TSA */ + #define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* "" Null Selector Clears Base */ + #define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */ + #define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */ +@@ -470,6 +471,10 @@ + #define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */ + #define X86_FEATURE_INDIRECT_THUNK_ITS (21*32 + 5) /* "" Use thunk for indirect branches in lower half of cacheline */ + ++#define X86_FEATURE_TSA_SQ_NO (21*32+11) /* "" AMD CPU not vulnerable to TSA-SQ */ ++#define X86_FEATURE_TSA_L1_NO (21*32+12) /* "" AMD CPU not vulnerable to TSA-L1 */ ++#define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* "" Clear CPU buffers using VERW before VMRUN */ ++ + /* + * BUG word(s) + */ +@@ -521,4 +526,5 @@ + #define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */ + #define X86_BUG_ITS X86_BUG(1*32 + 5) /* CPU is affected by Indirect Target Selection */ + #define X86_BUG_ITS_NATIVE_ONLY X86_BUG(1*32 + 6) /* CPU is affected by ITS, VMX is not affected */ ++#define X86_BUG_TSA X86_BUG(1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */ + #endif /* _ASM_X86_CPUFEATURES_H */ +diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h +index 9acfe2bcf1fd5b..9bfb7b90e2990e 100644 +--- a/arch/x86/include/asm/irqflags.h ++++ b/arch/x86/include/asm/irqflags.h +@@ -44,13 +44,13 @@ static __always_inline void native_irq_enable(void) + + static __always_inline void native_safe_halt(void) + { +- mds_idle_clear_cpu_buffers(); ++ x86_idle_clear_cpu_buffers(); + asm volatile("sti; hlt": : :"memory"); + } + + static __always_inline void native_halt(void) + { +- mds_idle_clear_cpu_buffers(); ++ x86_idle_clear_cpu_buffers(); + asm volatile("hlt": : :"memory"); + } + +diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h +index a541411d9226ef..ae7a83e3f743e0 100644 +--- a/arch/x86/include/asm/mwait.h ++++ b/arch/x86/include/asm/mwait.h +@@ -44,8 +44,6 @@ static __always_inline void __monitorx(const void *eax, unsigned long ecx, + + static __always_inline void __mwait(unsigned long eax, unsigned long ecx) + { +- mds_idle_clear_cpu_buffers(); +- + /* "mwait %eax, %ecx;" */ + asm volatile(".byte 0x0f, 0x01, 0xc9;" + :: "a" (eax), "c" (ecx)); +@@ -80,7 +78,7 @@ static __always_inline void __mwait(unsigned long eax, unsigned long ecx) + static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx, + unsigned long ecx) + { +- /* No MDS buffer clear as this is AMD/HYGON only */ ++ /* No need for TSA buffer clearing on AMD */ + + /* "mwaitx %eax, %ebx, %ecx;" */ + asm volatile(".byte 0x0f, 0x01, 0xfb;" +@@ -89,7 +87,7 @@ static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx, + + static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx) + { +- mds_idle_clear_cpu_buffers(); ++ + /* "mwait %eax, %ecx;" */ + asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" + :: "a" (eax), "c" (ecx)); +@@ -107,21 +105,29 @@ static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx) + */ + static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) + { ++ if (need_resched()) ++ return; ++ ++ x86_idle_clear_cpu_buffers(); ++ + if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) { + const void *addr = ¤t_thread_info()->flags; + + alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr)); + __monitor(addr, 0, 0); + +- if (!need_resched()) { +- if (ecx & 1) { +- __mwait(eax, ecx); +- } else { +- __sti_mwait(eax, ecx); +- raw_local_irq_disable(); +- } ++ if (need_resched()) ++ goto out; ++ ++ if (ecx & 1) { ++ __mwait(eax, ecx); ++ } else { ++ __sti_mwait(eax, ecx); ++ raw_local_irq_disable(); + } + } ++ ++out: + current_clr_polling(); + } + +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h +index bc4fa6d09d29d9..04f5a41c3a04ed 100644 +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -324,25 +324,31 @@ + .endm + + /* +- * Macro to execute VERW instruction that mitigate transient data sampling +- * attacks such as MDS. On affected systems a microcode update overloaded VERW +- * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF. +- * ++ * Macro to execute VERW insns that mitigate transient data sampling ++ * attacks such as MDS or TSA. On affected systems a microcode update ++ * overloaded VERW insns to also clear the CPU buffers. VERW clobbers ++ * CFLAGS.ZF. + * Note: Only the memory operand variant of VERW clears the CPU buffers. + */ +-.macro CLEAR_CPU_BUFFERS ++.macro __CLEAR_CPU_BUFFERS feature + #ifdef CONFIG_X86_64 +- ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF ++ ALTERNATIVE "", "verw x86_verw_sel(%rip)", \feature + #else + /* + * In 32bit mode, the memory operand must be a %cs reference. The data + * segments may not be usable (vm86 mode), and the stack segment may not + * be flat (ESPFIX32). + */ +- ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF ++ ALTERNATIVE "", "verw %cs:x86_verw_sel", \feature + #endif + .endm + ++#define CLEAR_CPU_BUFFERS \ ++ __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF ++ ++#define VM_CLEAR_CPU_BUFFERS \ ++ __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM ++ + #ifdef CONFIG_X86_64 + .macro CLEAR_BRANCH_HISTORY + ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP +@@ -592,24 +598,24 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp); + DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); + DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + +-DECLARE_STATIC_KEY_FALSE(mds_idle_clear); ++DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear); + + DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); + + DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear); + +-extern u16 mds_verw_sel; ++extern u16 x86_verw_sel; + + #include + + /** +- * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability ++ * x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns + * + * This uses the otherwise unused and obsolete VERW instruction in + * combination with microcode which triggers a CPU buffer flush when the + * instruction is executed. + */ +-static __always_inline void mds_clear_cpu_buffers(void) ++static __always_inline void x86_clear_cpu_buffers(void) + { + static const u16 ds = __KERNEL_DS; + +@@ -626,14 +632,15 @@ static __always_inline void mds_clear_cpu_buffers(void) + } + + /** +- * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability ++ * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS ++ * and TSA vulnerabilities. + * + * Clear CPU buffers if the corresponding static key is enabled + */ +-static __always_inline void mds_idle_clear_cpu_buffers(void) ++static __always_inline void x86_idle_clear_cpu_buffers(void) + { +- if (static_branch_likely(&mds_idle_clear)) +- mds_clear_cpu_buffers(); ++ if (static_branch_likely(&cpu_buf_idle_clear)) ++ x86_clear_cpu_buffers(); + } + + #endif /* __ASSEMBLY__ */ +diff --git a/arch/x86/include/uapi/asm/debugreg.h b/arch/x86/include/uapi/asm/debugreg.h +index 0007ba077c0c2b..41da492dfb01f0 100644 +--- a/arch/x86/include/uapi/asm/debugreg.h ++++ b/arch/x86/include/uapi/asm/debugreg.h +@@ -15,7 +15,26 @@ + which debugging register was responsible for the trap. The other bits + are either reserved or not of interest to us. */ + +-/* Define reserved bits in DR6 which are always set to 1 */ ++/* ++ * Define bits in DR6 which are set to 1 by default. ++ * ++ * This is also the DR6 architectural value following Power-up, Reset or INIT. ++ * ++ * Note, with the introduction of Bus Lock Detection (BLD) and Restricted ++ * Transactional Memory (RTM), the DR6 register has been modified: ++ * ++ * 1) BLD flag (bit 11) is no longer reserved to 1 if the CPU supports ++ * Bus Lock Detection. The assertion of a bus lock could clear it. ++ * ++ * 2) RTM flag (bit 16) is no longer reserved to 1 if the CPU supports ++ * restricted transactional memory. #DB occurred inside an RTM region ++ * could clear it. ++ * ++ * Apparently, DR6.BLD and DR6.RTM are active low bits. ++ * ++ * As a result, DR6_RESERVED is an incorrect name now, but it is kept for ++ * compatibility. ++ */ + #define DR6_RESERVED (0xFFFF0FF0) + + #define DR_TRAP0 (0x1) /* db0 */ +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index 498f2753777292..1180689a239037 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -539,6 +539,63 @@ static void early_init_amd_mc(struct cpuinfo_x86 *c) + #endif + } + ++static bool amd_check_tsa_microcode(void) ++{ ++ struct cpuinfo_x86 *c = &boot_cpu_data; ++ union zen_patch_rev p; ++ u32 min_rev = 0; ++ ++ p.ext_fam = c->x86 - 0xf; ++ p.model = c->x86_model; ++ p.stepping = c->x86_stepping; ++ ++ if (cpu_has(c, X86_FEATURE_ZEN3) || ++ cpu_has(c, X86_FEATURE_ZEN4)) { ++ switch (p.ucode_rev >> 8) { ++ case 0xa0011: min_rev = 0x0a0011d7; break; ++ case 0xa0012: min_rev = 0x0a00123b; break; ++ case 0xa0082: min_rev = 0x0a00820d; break; ++ case 0xa1011: min_rev = 0x0a10114c; break; ++ case 0xa1012: min_rev = 0x0a10124c; break; ++ case 0xa1081: min_rev = 0x0a108109; break; ++ case 0xa2010: min_rev = 0x0a20102e; break; ++ case 0xa2012: min_rev = 0x0a201211; break; ++ case 0xa4041: min_rev = 0x0a404108; break; ++ case 0xa5000: min_rev = 0x0a500012; break; ++ case 0xa6012: min_rev = 0x0a60120a; break; ++ case 0xa7041: min_rev = 0x0a704108; break; ++ case 0xa7052: min_rev = 0x0a705208; break; ++ case 0xa7080: min_rev = 0x0a708008; break; ++ case 0xa70c0: min_rev = 0x0a70c008; break; ++ case 0xaa002: min_rev = 0x0aa00216; break; ++ default: ++ pr_debug("%s: ucode_rev: 0x%x, current revision: 0x%x\n", ++ __func__, p.ucode_rev, c->microcode); ++ return false; ++ } ++ } ++ ++ if (!min_rev) ++ return false; ++ ++ return c->microcode >= min_rev; ++} ++ ++static void tsa_init(struct cpuinfo_x86 *c) ++{ ++ if (cpu_has(c, X86_FEATURE_HYPERVISOR)) ++ return; ++ ++ if (cpu_has(c, X86_FEATURE_ZEN3) || ++ cpu_has(c, X86_FEATURE_ZEN4)) { ++ if (amd_check_tsa_microcode()) ++ setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR); ++ } else { ++ setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO); ++ setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO); ++ } ++} ++ + static void bsp_init_amd(struct cpuinfo_x86 *c) + { + if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { +@@ -645,6 +702,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) + break; + } + ++ ++ tsa_init(c); ++ + return; + + warn: +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 07b45bbf6348de..c4d5ac99c6af84 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -50,6 +50,7 @@ static void __init l1d_flush_select_mitigation(void); + static void __init srso_select_mitigation(void); + static void __init gds_select_mitigation(void); + static void __init its_select_mitigation(void); ++static void __init tsa_select_mitigation(void); + + /* The base value of the SPEC_CTRL MSR without task-specific bits set */ + u64 x86_spec_ctrl_base; +@@ -122,9 +123,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); + /* Control unconditional IBPB in switch_mm() */ + DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + +-/* Control MDS CPU buffer clear before idling (halt, mwait) */ +-DEFINE_STATIC_KEY_FALSE(mds_idle_clear); +-EXPORT_SYMBOL_GPL(mds_idle_clear); ++/* Control CPU buffer clear before idling (halt, mwait) */ ++DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear); ++EXPORT_SYMBOL_GPL(cpu_buf_idle_clear); + + /* + * Controls whether l1d flush based mitigations are enabled, +@@ -185,6 +186,7 @@ void __init cpu_select_mitigations(void) + srso_select_mitigation(); + gds_select_mitigation(); + its_select_mitigation(); ++ tsa_select_mitigation(); + } + + /* +@@ -445,7 +447,7 @@ static void __init mmio_select_mitigation(void) + * is required irrespective of SMT state. + */ + if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) +- static_branch_enable(&mds_idle_clear); ++ static_branch_enable(&cpu_buf_idle_clear); + + /* + * Check if the system has the right microcode. +@@ -2082,10 +2084,10 @@ static void update_mds_branch_idle(void) + return; + + if (sched_smt_active()) { +- static_branch_enable(&mds_idle_clear); ++ static_branch_enable(&cpu_buf_idle_clear); + } else if (mmio_mitigation == MMIO_MITIGATION_OFF || + (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) { +- static_branch_disable(&mds_idle_clear); ++ static_branch_disable(&cpu_buf_idle_clear); + } + } + +@@ -2093,6 +2095,94 @@ static void update_mds_branch_idle(void) + #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" + #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" + ++#undef pr_fmt ++#define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt ++ ++enum tsa_mitigations { ++ TSA_MITIGATION_NONE, ++ TSA_MITIGATION_UCODE_NEEDED, ++ TSA_MITIGATION_USER_KERNEL, ++ TSA_MITIGATION_VM, ++ TSA_MITIGATION_FULL, ++}; ++ ++static const char * const tsa_strings[] = { ++ [TSA_MITIGATION_NONE] = "Vulnerable", ++ [TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", ++ [TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary", ++ [TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM", ++ [TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", ++}; ++ ++static enum tsa_mitigations tsa_mitigation __ro_after_init = ++ IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_FULL : TSA_MITIGATION_NONE; ++ ++static int __init tsa_parse_cmdline(char *str) ++{ ++ if (!str) ++ return -EINVAL; ++ ++ if (!strcmp(str, "off")) ++ tsa_mitigation = TSA_MITIGATION_NONE; ++ else if (!strcmp(str, "on")) ++ tsa_mitigation = TSA_MITIGATION_FULL; ++ else if (!strcmp(str, "user")) ++ tsa_mitigation = TSA_MITIGATION_USER_KERNEL; ++ else if (!strcmp(str, "vm")) ++ tsa_mitigation = TSA_MITIGATION_VM; ++ else ++ pr_err("Ignoring unknown tsa=%s option.\n", str); ++ ++ return 0; ++} ++early_param("tsa", tsa_parse_cmdline); ++ ++static void __init tsa_select_mitigation(void) ++{ ++ if (tsa_mitigation == TSA_MITIGATION_NONE) ++ return; ++ ++ if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) { ++ tsa_mitigation = TSA_MITIGATION_NONE; ++ return; ++ } ++ ++ if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR)) ++ tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED; ++ ++ switch (tsa_mitigation) { ++ case TSA_MITIGATION_USER_KERNEL: ++ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); ++ break; ++ ++ case TSA_MITIGATION_VM: ++ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); ++ break; ++ ++ case TSA_MITIGATION_UCODE_NEEDED: ++ if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) ++ goto out; ++ ++ pr_notice("Forcing mitigation on in a VM\n"); ++ ++ /* ++ * On the off-chance that microcode has been updated ++ * on the host, enable the mitigation in the guest just ++ * in case. ++ */ ++ fallthrough; ++ case TSA_MITIGATION_FULL: ++ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); ++ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); ++ break; ++ default: ++ break; ++ } ++ ++out: ++ pr_info("%s\n", tsa_strings[tsa_mitigation]); ++} ++ + void cpu_bugs_smt_update(void) + { + mutex_lock(&spec_ctrl_mutex); +@@ -2146,6 +2236,24 @@ void cpu_bugs_smt_update(void) + break; + } + ++ switch (tsa_mitigation) { ++ case TSA_MITIGATION_USER_KERNEL: ++ case TSA_MITIGATION_VM: ++ case TSA_MITIGATION_FULL: ++ case TSA_MITIGATION_UCODE_NEEDED: ++ /* ++ * TSA-SQ can potentially lead to info leakage between ++ * SMT threads. ++ */ ++ if (sched_smt_active()) ++ static_branch_enable(&cpu_buf_idle_clear); ++ else ++ static_branch_disable(&cpu_buf_idle_clear); ++ break; ++ case TSA_MITIGATION_NONE: ++ break; ++ } ++ + mutex_unlock(&spec_ctrl_mutex); + } + +@@ -3075,6 +3183,11 @@ static ssize_t gds_show_state(char *buf) + return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); + } + ++static ssize_t tsa_show_state(char *buf) ++{ ++ return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]); ++} ++ + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, + char *buf, unsigned int bug) + { +@@ -3136,6 +3249,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr + case X86_BUG_ITS: + return its_show_state(buf); + ++ case X86_BUG_TSA: ++ return tsa_show_state(buf); ++ + default: + break; + } +@@ -3220,4 +3336,9 @@ ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_att + { + return cpu_show_common(dev, attr, buf, X86_BUG_ITS); + } ++ ++ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ return cpu_show_common(dev, attr, buf, X86_BUG_TSA); ++} + #endif +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index b6e43dad577a3c..f66c71bffa6d93 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1277,6 +1277,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { + #define ITS BIT(8) + /* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */ + #define ITS_NATIVE_ONLY BIT(9) ++/* CPU is affected by Transient Scheduler Attacks */ ++#define TSA BIT(10) + + static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { + VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), +@@ -1324,7 +1326,7 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { + VULNBL_AMD(0x16, RETBLEED), + VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO), + VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO), +- VULNBL_AMD(0x19, SRSO), ++ VULNBL_AMD(0x19, SRSO | TSA), + {} + }; + +@@ -1529,6 +1531,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY); + } + ++ if (c->x86_vendor == X86_VENDOR_AMD) { ++ if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) || ++ !cpu_has(c, X86_FEATURE_TSA_L1_NO)) { ++ if (cpu_matches(cpu_vuln_blacklist, TSA) || ++ /* Enable bug on Zen guests to allow for live migration. */ ++ (cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN))) ++ setup_force_cpu_bug(X86_BUG_TSA); ++ } ++ } ++ + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) + return; + +@@ -2215,20 +2227,16 @@ EXPORT_PER_CPU_SYMBOL(__stack_chk_guard); + + #endif /* CONFIG_X86_64 */ + +-/* +- * Clear all 6 debug registers: +- */ +-static void clear_all_debug_regs(void) ++static void initialize_debug_regs(void) + { +- int i; +- +- for (i = 0; i < 8; i++) { +- /* Ignore db4, db5 */ +- if ((i == 4) || (i == 5)) +- continue; +- +- set_debugreg(0, i); +- } ++ /* Control register first -- to make sure everything is disabled. */ ++ set_debugreg(0, 7); ++ set_debugreg(DR6_RESERVED, 6); ++ /* dr5 and dr4 don't exist */ ++ set_debugreg(0, 3); ++ set_debugreg(0, 2); ++ set_debugreg(0, 1); ++ set_debugreg(0, 0); + } + + #ifdef CONFIG_KGDB +@@ -2371,7 +2379,7 @@ void cpu_init(void) + + load_mm_ldt(&init_mm); + +- clear_all_debug_regs(); ++ initialize_debug_regs(); + dbg_restore_debug_regs(); + + doublefault_init_cpu_tss(); +diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c +index 9b0570f769eb3d..7444fe0e3d08cd 100644 +--- a/arch/x86/kernel/cpu/microcode/amd.c ++++ b/arch/x86/kernel/cpu/microcode/amd.c +@@ -96,18 +96,6 @@ static struct equiv_cpu_table { + struct equiv_cpu_entry *entry; + } equiv_table; + +-union zen_patch_rev { +- struct { +- __u32 rev : 8, +- stepping : 4, +- model : 4, +- __reserved : 4, +- ext_model : 4, +- ext_fam : 8; +- }; +- __u32 ucode_rev; +-}; +- + union cpuid_1_eax { + struct { + __u32 stepping : 4, +diff --git a/arch/x86/kernel/cpu/microcode/amd_shas.c b/arch/x86/kernel/cpu/microcode/amd_shas.c +index 2a1655b1fdd883..1fd349cfc8024a 100644 +--- a/arch/x86/kernel/cpu/microcode/amd_shas.c ++++ b/arch/x86/kernel/cpu/microcode/amd_shas.c +@@ -231,6 +231,13 @@ static const struct patch_digest phashes[] = { + 0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21, + } + }, ++ { 0xa0011d7, { ++ 0x35,0x07,0xcd,0x40,0x94,0xbc,0x81,0x6b, ++ 0xfc,0x61,0x56,0x1a,0xe2,0xdb,0x96,0x12, ++ 0x1c,0x1c,0x31,0xb1,0x02,0x6f,0xe5,0xd2, ++ 0xfe,0x1b,0x04,0x03,0x2c,0x8f,0x4c,0x36, ++ } ++ }, + { 0xa001223, { + 0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8, + 0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4, +@@ -294,6 +301,13 @@ static const struct patch_digest phashes[] = { + 0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59, + } + }, ++ { 0xa00123b, { ++ 0xef,0xa1,0x1e,0x71,0xf1,0xc3,0x2c,0xe2, ++ 0xc3,0xef,0x69,0x41,0x7a,0x54,0xca,0xc3, ++ 0x8f,0x62,0x84,0xee,0xc2,0x39,0xd9,0x28, ++ 0x95,0xa7,0x12,0x49,0x1e,0x30,0x71,0x72, ++ } ++ }, + { 0xa00820c, { + 0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3, + 0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63, +@@ -301,6 +315,13 @@ static const struct patch_digest phashes[] = { + 0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2, + } + }, ++ { 0xa00820d, { ++ 0xf9,0x2a,0xc0,0xf4,0x9e,0xa4,0x87,0xa4, ++ 0x7d,0x87,0x00,0xfd,0xab,0xda,0x19,0xca, ++ 0x26,0x51,0x32,0xc1,0x57,0x91,0xdf,0xc1, ++ 0x05,0xeb,0x01,0x7c,0x5a,0x95,0x21,0xb7, ++ } ++ }, + { 0xa10113e, { + 0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10, + 0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0, +@@ -322,6 +343,13 @@ static const struct patch_digest phashes[] = { + 0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4, + } + }, ++ { 0xa10114c, { ++ 0x9e,0xb6,0xa2,0xd9,0x87,0x38,0xc5,0x64, ++ 0xd8,0x88,0xfa,0x78,0x98,0xf9,0x6f,0x74, ++ 0x39,0x90,0x1b,0xa5,0xcf,0x5e,0xb4,0x2a, ++ 0x02,0xff,0xd4,0x8c,0x71,0x8b,0xe2,0xc0, ++ } ++ }, + { 0xa10123e, { + 0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18, + 0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d, +@@ -343,6 +371,13 @@ static const struct patch_digest phashes[] = { + 0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75, + } + }, ++ { 0xa10124c, { ++ 0x29,0xea,0xf1,0x2c,0xb2,0xe4,0xef,0x90, ++ 0xa4,0xcd,0x1d,0x86,0x97,0x17,0x61,0x46, ++ 0xfc,0x22,0xcb,0x57,0x75,0x19,0xc8,0xcc, ++ 0x0c,0xf5,0xbc,0xac,0x81,0x9d,0x9a,0xd2, ++ } ++ }, + { 0xa108108, { + 0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9, + 0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6, +@@ -350,6 +385,13 @@ static const struct patch_digest phashes[] = { + 0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16, + } + }, ++ { 0xa108109, { ++ 0x85,0xb4,0xbd,0x7c,0x49,0xa7,0xbd,0xfa, ++ 0x49,0x36,0x80,0x81,0xc5,0xb7,0x39,0x1b, ++ 0x9a,0xaa,0x50,0xde,0x9b,0xe9,0x32,0x35, ++ 0x42,0x7e,0x51,0x4f,0x52,0x2c,0x28,0x59, ++ } ++ }, + { 0xa20102d, { + 0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11, + 0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89, +@@ -357,6 +399,13 @@ static const struct patch_digest phashes[] = { + 0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4, + } + }, ++ { 0xa20102e, { ++ 0xbe,0x1f,0x32,0x04,0x0d,0x3c,0x9c,0xdd, ++ 0xe1,0xa4,0xbf,0x76,0x3a,0xec,0xc2,0xf6, ++ 0x11,0x00,0xa7,0xaf,0x0f,0xe5,0x02,0xc5, ++ 0x54,0x3a,0x1f,0x8c,0x16,0xb5,0xff,0xbe, ++ } ++ }, + { 0xa201210, { + 0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe, + 0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9, +@@ -364,6 +413,13 @@ static const struct patch_digest phashes[] = { + 0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41, + } + }, ++ { 0xa201211, { ++ 0x69,0xa1,0x17,0xec,0xd0,0xf6,0x6c,0x95, ++ 0xe2,0x1e,0xc5,0x59,0x1a,0x52,0x0a,0x27, ++ 0xc4,0xed,0xd5,0x59,0x1f,0xbf,0x00,0xff, ++ 0x08,0x88,0xb5,0xe1,0x12,0xb6,0xcc,0x27, ++ } ++ }, + { 0xa404107, { + 0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45, + 0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0, +@@ -371,6 +427,13 @@ static const struct patch_digest phashes[] = { + 0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99, + } + }, ++ { 0xa404108, { ++ 0x69,0x67,0x43,0x06,0xf8,0x0c,0x62,0xdc, ++ 0xa4,0x21,0x30,0x4f,0x0f,0x21,0x2c,0xcb, ++ 0xcc,0x37,0xf1,0x1c,0xc3,0xf8,0x2f,0x19, ++ 0xdf,0x53,0x53,0x46,0xb1,0x15,0xea,0x00, ++ } ++ }, + { 0xa500011, { + 0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4, + 0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1, +@@ -378,6 +441,13 @@ static const struct patch_digest phashes[] = { + 0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74, + } + }, ++ { 0xa500012, { ++ 0xeb,0x74,0x0d,0x47,0xa1,0x8e,0x09,0xe4, ++ 0x93,0x4c,0xad,0x03,0x32,0x4c,0x38,0x16, ++ 0x10,0x39,0xdd,0x06,0xaa,0xce,0xd6,0x0f, ++ 0x62,0x83,0x9d,0x8e,0x64,0x55,0xbe,0x63, ++ } ++ }, + { 0xa601209, { + 0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32, + 0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30, +@@ -385,6 +455,13 @@ static const struct patch_digest phashes[] = { + 0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d, + } + }, ++ { 0xa60120a, { ++ 0x0c,0x8b,0x3d,0xfd,0x52,0x52,0x85,0x7d, ++ 0x20,0x3a,0xe1,0x7e,0xa4,0x21,0x3b,0x7b, ++ 0x17,0x86,0xae,0xac,0x13,0xb8,0x63,0x9d, ++ 0x06,0x01,0xd0,0xa0,0x51,0x9a,0x91,0x2c, ++ } ++ }, + { 0xa704107, { + 0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6, + 0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93, +@@ -392,6 +469,13 @@ static const struct patch_digest phashes[] = { + 0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39, + } + }, ++ { 0xa704108, { ++ 0xd7,0x55,0x15,0x2b,0xfe,0xc4,0xbc,0x93, ++ 0xec,0x91,0xa0,0xae,0x45,0xb7,0xc3,0x98, ++ 0x4e,0xff,0x61,0x77,0x88,0xc2,0x70,0x49, ++ 0xe0,0x3a,0x1d,0x84,0x38,0x52,0xbf,0x5a, ++ } ++ }, + { 0xa705206, { + 0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4, + 0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7, +@@ -399,6 +483,13 @@ static const struct patch_digest phashes[] = { + 0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc, + } + }, ++ { 0xa705208, { ++ 0x30,0x1d,0x55,0x24,0xbc,0x6b,0x5a,0x19, ++ 0x0c,0x7d,0x1d,0x74,0xaa,0xd1,0xeb,0xd2, ++ 0x16,0x62,0xf7,0x5b,0xe1,0x1f,0x18,0x11, ++ 0x5c,0xf0,0x94,0x90,0x26,0xec,0x69,0xff, ++ } ++ }, + { 0xa708007, { + 0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3, + 0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2, +@@ -406,6 +497,13 @@ static const struct patch_digest phashes[] = { + 0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93, + } + }, ++ { 0xa708008, { ++ 0x08,0x6e,0xf0,0x22,0x4b,0x8e,0xc4,0x46, ++ 0x58,0x34,0xe6,0x47,0xa2,0x28,0xfd,0xab, ++ 0x22,0x3d,0xdd,0xd8,0x52,0x9e,0x1d,0x16, ++ 0xfa,0x01,0x68,0x14,0x79,0x3e,0xe8,0x6b, ++ } ++ }, + { 0xa70c005, { + 0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b, + 0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f, +@@ -413,6 +511,13 @@ static const struct patch_digest phashes[] = { + 0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13, + } + }, ++ { 0xa70c008, { ++ 0x0f,0xdb,0x37,0xa1,0x10,0xaf,0xd4,0x21, ++ 0x94,0x0d,0xa4,0xa2,0xe9,0x86,0x6c,0x0e, ++ 0x85,0x7c,0x36,0x30,0xa3,0x3a,0x78,0x66, ++ 0x18,0x10,0x60,0x0d,0x78,0x3d,0x44,0xd0, ++ } ++ }, + { 0xaa00116, { + 0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63, + 0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5, +@@ -441,4 +546,11 @@ static const struct patch_digest phashes[] = { + 0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef, + } + }, ++ { 0xaa00216, { ++ 0x79,0xfb,0x5b,0x9f,0xb6,0xe6,0xa8,0xf5, ++ 0x4e,0x7c,0x4f,0x8e,0x1d,0xad,0xd0,0x08, ++ 0xc2,0x43,0x7c,0x8b,0xe6,0xdb,0xd0,0xd2, ++ 0xe8,0x39,0x26,0xc1,0xe5,0x5a,0x48,0xf1, ++ } ++ }, + }; +diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c +index af5aa2c754c222..7a42e699f6e39a 100644 +--- a/arch/x86/kernel/cpu/scattered.c ++++ b/arch/x86/kernel/cpu/scattered.c +@@ -48,6 +48,8 @@ static const struct cpuid_bit cpuid_bits[] = { + { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 }, + { X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 }, + { X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 }, ++ { X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 }, ++ { X86_FEATURE_TSA_L1_NO, CPUID_ECX, 2, 0x80000021, 0 }, + { X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 }, + { X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 }, + { X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 }, +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c +index 33c235e9d0d3fb..e3c26cc45f7008 100644 +--- a/arch/x86/kernel/process.c ++++ b/arch/x86/kernel/process.c +@@ -928,16 +928,24 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c) + */ + static __cpuidle void mwait_idle(void) + { ++ if (need_resched()) ++ return; ++ ++ x86_idle_clear_cpu_buffers(); ++ + if (!current_set_polling_and_test()) { + const void *addr = ¤t_thread_info()->flags; + + alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr)); + __monitor(addr, 0, 0); +- if (!need_resched()) { +- __sti_mwait(0, 0); +- raw_local_irq_disable(); +- } ++ if (need_resched()) ++ goto out; ++ ++ __sti_mwait(0, 0); ++ raw_local_irq_disable(); + } ++ ++out: + __current_clr_polling(); + } + +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c +index 8718d58dd0fbea..a52db362a65d16 100644 +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -975,24 +975,32 @@ static bool is_sysenter_singlestep(struct pt_regs *regs) + #endif + } + +-static __always_inline unsigned long debug_read_clear_dr6(void) ++static __always_inline unsigned long debug_read_reset_dr6(void) + { + unsigned long dr6; + ++ get_debugreg(dr6, 6); ++ dr6 ^= DR6_RESERVED; /* Flip to positive polarity */ ++ + /* + * The Intel SDM says: + * +- * Certain debug exceptions may clear bits 0-3. The remaining +- * contents of the DR6 register are never cleared by the +- * processor. To avoid confusion in identifying debug +- * exceptions, debug handlers should clear the register before +- * returning to the interrupted task. ++ * Certain debug exceptions may clear bits 0-3 of DR6. ++ * ++ * BLD induced #DB clears DR6.BLD and any other debug ++ * exception doesn't modify DR6.BLD. + * +- * Keep it simple: clear DR6 immediately. ++ * RTM induced #DB clears DR6.RTM and any other debug ++ * exception sets DR6.RTM. ++ * ++ * To avoid confusion in identifying debug exceptions, ++ * debug handlers should set DR6.BLD and DR6.RTM, and ++ * clear other DR6 bits before returning. ++ * ++ * Keep it simple: write DR6 with its architectural reset ++ * value 0xFFFF0FF0, defined as DR6_RESERVED, immediately. + */ +- get_debugreg(dr6, 6); + set_debugreg(DR6_RESERVED, 6); +- dr6 ^= DR6_RESERVED; /* Flip to positive polarity */ + + return dr6; + } +@@ -1188,19 +1196,19 @@ static __always_inline void exc_debug_user(struct pt_regs *regs, + /* IST stack entry */ + DEFINE_IDTENTRY_DEBUG(exc_debug) + { +- exc_debug_kernel(regs, debug_read_clear_dr6()); ++ exc_debug_kernel(regs, debug_read_reset_dr6()); + } + + /* User entry, runs on regular task stack */ + DEFINE_IDTENTRY_DEBUG_USER(exc_debug) + { +- exc_debug_user(regs, debug_read_clear_dr6()); ++ exc_debug_user(regs, debug_read_reset_dr6()); + } + #else + /* 32 bit does not have separate entry points. */ + DEFINE_IDTENTRY_RAW(exc_debug) + { +- unsigned long dr6 = debug_read_clear_dr6(); ++ unsigned long dr6 = debug_read_reset_dr6(); + + if (user_mode(regs)) + exc_debug_user(regs, dr6); +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c +index a6cffeff75d40b..288db351677222 100644 +--- a/arch/x86/kvm/cpuid.c ++++ b/arch/x86/kvm/cpuid.c +@@ -780,6 +780,7 @@ void kvm_set_cpu_caps(void) + + kvm_cpu_cap_mask(CPUID_8000_0021_EAX, + F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ | ++ F(VERW_CLEAR) | + F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ + ); + +@@ -790,6 +791,10 @@ void kvm_set_cpu_caps(void) + F(PERFMON_V2) + ); + ++ kvm_cpu_cap_init_kvm_defined(CPUID_8000_0021_ECX, ++ F(TSA_SQ_NO) | F(TSA_L1_NO) ++ ); ++ + /* + * Synthesize "LFENCE is serializing" into the AMD-defined entry in + * KVM's supported CPUID if the feature is reported as supported by the +@@ -1296,8 +1301,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) + entry->eax = entry->ebx = entry->ecx = entry->edx = 0; + break; + case 0x80000021: +- entry->ebx = entry->ecx = entry->edx = 0; ++ entry->ebx = entry->edx = 0; + cpuid_entry_override(entry, CPUID_8000_0021_EAX); ++ cpuid_entry_override(entry, CPUID_8000_0021_ECX); + break; + /* AMD Extended Performance Monitoring and Debug */ + case 0x80000022: { +diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h +index 2f4e155080badc..be23712354bd8e 100644 +--- a/arch/x86/kvm/reverse_cpuid.h ++++ b/arch/x86/kvm/reverse_cpuid.h +@@ -17,6 +17,7 @@ enum kvm_only_cpuid_leafs { + CPUID_8000_0007_EDX, + CPUID_8000_0022_EAX, + CPUID_7_2_EDX, ++ CPUID_8000_0021_ECX, + NR_KVM_CPU_CAPS, + + NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS, +@@ -61,6 +62,10 @@ enum kvm_only_cpuid_leafs { + /* CPUID level 0x80000022 (EAX) */ + #define KVM_X86_FEATURE_PERFMON_V2 KVM_X86_FEATURE(CPUID_8000_0022_EAX, 0) + ++/* CPUID level 0x80000021 (ECX) */ ++#define KVM_X86_FEATURE_TSA_SQ_NO KVM_X86_FEATURE(CPUID_8000_0021_ECX, 1) ++#define KVM_X86_FEATURE_TSA_L1_NO KVM_X86_FEATURE(CPUID_8000_0021_ECX, 2) ++ + struct cpuid_reg { + u32 function; + u32 index; +@@ -90,6 +95,7 @@ static const struct cpuid_reg reverse_cpuid[] = { + [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX}, + [CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX}, + [CPUID_7_2_EDX] = { 7, 2, CPUID_EDX}, ++ [CPUID_8000_0021_ECX] = {0x80000021, 0, CPUID_ECX}, + }; + + /* +@@ -129,6 +135,8 @@ static __always_inline u32 __feature_translate(int x86_feature) + KVM_X86_TRANSLATE_FEATURE(PERFMON_V2); + KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL); + KVM_X86_TRANSLATE_FEATURE(BHI_CTRL); ++ KVM_X86_TRANSLATE_FEATURE(TSA_SQ_NO); ++ KVM_X86_TRANSLATE_FEATURE(TSA_L1_NO); + default: + return x86_feature; + } +diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S +index ef2ebabb059c8c..56fe34d9397f64 100644 +--- a/arch/x86/kvm/svm/vmenter.S ++++ b/arch/x86/kvm/svm/vmenter.S +@@ -167,6 +167,9 @@ SYM_FUNC_START(__svm_vcpu_run) + #endif + mov VCPU_RDI(%_ASM_DI), %_ASM_DI + ++ /* Clobbers EFLAGS.ZF */ ++ VM_CLEAR_CPU_BUFFERS ++ + /* Enter guest mode */ + sti + +@@ -334,6 +337,9 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) + mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX + mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX + ++ /* Clobbers EFLAGS.ZF */ ++ VM_CLEAR_CPU_BUFFERS ++ + /* Enter guest mode */ + sti + +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index e7f3b70f9114ae..e53620e189254b 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -7263,7 +7263,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, + vmx_l1d_flush(vcpu); + else if (static_branch_unlikely(&mmio_stale_data_clear) && + kvm_arch_has_assigned_device(vcpu->kvm)) +- mds_clear_cpu_buffers(); ++ x86_clear_cpu_buffers(); + + vmx_disable_fb_clear(vmx); + +diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c +index e809c2aed78aed..a232746d150a75 100644 +--- a/drivers/acpi/acpica/dsmethod.c ++++ b/drivers/acpi/acpica/dsmethod.c +@@ -483,6 +483,13 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, + return_ACPI_STATUS(AE_NULL_OBJECT); + } + ++ if (this_walk_state->num_operands < obj_desc->method.param_count) { ++ ACPI_ERROR((AE_INFO, "Missing argument for method [%4.4s]", ++ acpi_ut_get_node_name(method_node))); ++ ++ return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG); ++ } ++ + /* Init for new method, possibly wait on method mutex */ + + status = +diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c +index d36e71f475abdc..39a350755a1baf 100644 +--- a/drivers/ata/libata-acpi.c ++++ b/drivers/ata/libata-acpi.c +@@ -514,15 +514,19 @@ unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev, + EXPORT_SYMBOL_GPL(ata_acpi_gtm_xfermask); + + /** +- * ata_acpi_cbl_80wire - Check for 80 wire cable ++ * ata_acpi_cbl_pata_type - Return PATA cable type + * @ap: Port to check +- * @gtm: GTM data to use + * +- * Return 1 if the @gtm indicates the BIOS selected an 80wire mode. ++ * Return ATA_CBL_PATA* according to the transfer mode selected by BIOS + */ +-int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm) ++int ata_acpi_cbl_pata_type(struct ata_port *ap) + { + struct ata_device *dev; ++ int ret = ATA_CBL_PATA_UNK; ++ const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap); ++ ++ if (!gtm) ++ return ATA_CBL_PATA40; + + ata_for_each_dev(dev, &ap->link, ENABLED) { + unsigned int xfer_mask, udma_mask; +@@ -530,13 +534,17 @@ int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm) + xfer_mask = ata_acpi_gtm_xfermask(dev, gtm); + ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask); + +- if (udma_mask & ~ATA_UDMA_MASK_40C) +- return 1; ++ ret = ATA_CBL_PATA40; ++ ++ if (udma_mask & ~ATA_UDMA_MASK_40C) { ++ ret = ATA_CBL_PATA80; ++ break; ++ } + } + +- return 0; ++ return ret; + } +-EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire); ++EXPORT_SYMBOL_GPL(ata_acpi_cbl_pata_type); + + static void ata_acpi_gtf_to_tf(struct ata_device *dev, + const struct ata_acpi_gtf *gtf, +diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c +index b811efd2cc346a..73e81e160c91fb 100644 +--- a/drivers/ata/pata_cs5536.c ++++ b/drivers/ata/pata_cs5536.c +@@ -27,7 +27,7 @@ + #include + #include + +-#ifdef CONFIG_X86_32 ++#if defined(CONFIG_X86) && defined(CONFIG_X86_32) + #include + static int use_msr; + module_param_named(msr, use_msr, int, 0644); +diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c +index d82728a01832b5..bb80e7800dcbe9 100644 +--- a/drivers/ata/pata_via.c ++++ b/drivers/ata/pata_via.c +@@ -201,11 +201,9 @@ static int via_cable_detect(struct ata_port *ap) { + two drives */ + if (ata66 & (0x10100000 >> (16 * ap->port_no))) + return ATA_CBL_PATA80; ++ + /* Check with ACPI so we can spot BIOS reported SATA bridges */ +- if (ata_acpi_init_gtm(ap) && +- ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap))) +- return ATA_CBL_PATA80; +- return ATA_CBL_PATA40; ++ return ata_acpi_cbl_pata_type(ap); + } + + static int via_pre_reset(struct ata_link *link, unsigned long deadline) +diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c +index a5cfc1bfad51fb..a3aea3c1431aa9 100644 +--- a/drivers/base/cpu.c ++++ b/drivers/base/cpu.c +@@ -567,6 +567,7 @@ CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow); + CPU_SHOW_VULN_FALLBACK(gds); + CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling); + CPU_SHOW_VULN_FALLBACK(indirect_target_selection); ++CPU_SHOW_VULN_FALLBACK(tsa); + + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); + static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); +@@ -583,6 +584,7 @@ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NU + static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); + static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL); + static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL); ++static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL); + + static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_meltdown.attr, +@@ -600,6 +602,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_gather_data_sampling.attr, + &dev_attr_reg_file_data_sampling.attr, + &dev_attr_indirect_target_selection.attr, ++ &dev_attr_tsa.attr, + NULL + }; + +diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h +index 749ae1246f4cf8..d35caa3c69e15e 100644 +--- a/drivers/block/aoe/aoe.h ++++ b/drivers/block/aoe/aoe.h +@@ -80,6 +80,7 @@ enum { + DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */ + DEVFL_FREEING = (1<<7), /* set when device is being cleaned up */ + DEVFL_FREED = (1<<8), /* device has been cleaned up */ ++ DEVFL_DEAD = (1<<9), /* device has timed out of aoe_deadsecs */ + }; + + enum { +diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c +index d1f4ddc576451a..c4c5cf1ec71ba9 100644 +--- a/drivers/block/aoe/aoecmd.c ++++ b/drivers/block/aoe/aoecmd.c +@@ -754,7 +754,7 @@ rexmit_timer(struct timer_list *timer) + + utgts = count_targets(d, NULL); + +- if (d->flags & DEVFL_TKILL) { ++ if (d->flags & (DEVFL_TKILL | DEVFL_DEAD)) { + spin_unlock_irqrestore(&d->lock, flags); + return; + } +@@ -786,7 +786,8 @@ rexmit_timer(struct timer_list *timer) + * to clean up. + */ + list_splice(&flist, &d->factive[0]); +- aoedev_downdev(d); ++ d->flags |= DEVFL_DEAD; ++ queue_work(aoe_wq, &d->work); + goto out; + } + +@@ -898,6 +899,9 @@ aoecmd_sleepwork(struct work_struct *work) + { + struct aoedev *d = container_of(work, struct aoedev, work); + ++ if (d->flags & DEVFL_DEAD) ++ aoedev_downdev(d); ++ + if (d->flags & DEVFL_GDALLOC) + aoeblk_gdalloc(d); + +diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c +index 280679bde3a506..4240e11adfb769 100644 +--- a/drivers/block/aoe/aoedev.c ++++ b/drivers/block/aoe/aoedev.c +@@ -200,8 +200,11 @@ aoedev_downdev(struct aoedev *d) + struct list_head *head, *pos, *nx; + struct request *rq, *rqnext; + int i; ++ unsigned long flags; + +- d->flags &= ~DEVFL_UP; ++ spin_lock_irqsave(&d->lock, flags); ++ d->flags &= ~(DEVFL_UP | DEVFL_DEAD); ++ spin_unlock_irqrestore(&d->lock, flags); + + /* clean out active and to-be-retransmitted buffers */ + for (i = 0; i < NFACTIVE; i++) { +diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c +index 9093f751f1336a..8f3fa149a76d9b 100644 +--- a/drivers/dma-buf/dma-resv.c ++++ b/drivers/dma-buf/dma-resv.c +@@ -678,11 +678,13 @@ long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, + dma_resv_iter_begin(&cursor, obj, usage); + dma_resv_for_each_fence_unlocked(&cursor, fence) { + +- ret = dma_fence_wait_timeout(fence, intr, ret); +- if (ret <= 0) { +- dma_resv_iter_end(&cursor); +- return ret; +- } ++ ret = dma_fence_wait_timeout(fence, intr, timeout); ++ if (ret <= 0) ++ break; ++ ++ /* Even for zero timeout the return value is 1 */ ++ if (timeout) ++ timeout = ret; + } + dma_resv_iter_end(&cursor); + +diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c +index 5bdc246f5fad09..341e95269836e0 100644 +--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c ++++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c +@@ -187,6 +187,7 @@ struct fimd_context { + u32 i80ifcon; + bool i80_if; + bool suspended; ++ bool dp_clk_enabled; + wait_queue_head_t wait_vsync_queue; + atomic_t wait_vsync_event; + atomic_t win_updated; +@@ -1047,7 +1048,18 @@ static void fimd_dp_clock_enable(struct exynos_drm_clk *clk, bool enable) + struct fimd_context *ctx = container_of(clk, struct fimd_context, + dp_clk); + u32 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; ++ ++ if (enable == ctx->dp_clk_enabled) ++ return; ++ ++ if (enable) ++ pm_runtime_resume_and_get(ctx->dev); ++ ++ ctx->dp_clk_enabled = enable; + writel(val, ctx->regs + DP_MIE_CLKCON); ++ ++ if (!enable) ++ pm_runtime_put(ctx->dev); + } + + static const struct exynos_drm_crtc_ops fimd_crtc_ops = { +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +index 023b2ea74c3601..5a687a3686bd53 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +@@ -2013,7 +2013,7 @@ static int eb_capture_stage(struct i915_execbuffer *eb) + continue; + + if (i915_gem_context_is_recoverable(eb->gem_context) && +- GRAPHICS_VER_FULL(eb->i915) > IP_VER(12, 10)) ++ (IS_DGFX(eb->i915) || GRAPHICS_VER_FULL(eb->i915) > IP_VER(12, 0))) + return -EINVAL; + + for_each_batch_create_order(eb, j) { +diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c +index bcc3605158dbde..27420ed631d850 100644 +--- a/drivers/gpu/drm/i915/gt/intel_gsc.c ++++ b/drivers/gpu/drm/i915/gt/intel_gsc.c +@@ -298,7 +298,7 @@ static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id) + if (gt->gsc.intf[intf_id].irq < 0) + return; + +- ret = generic_handle_irq(gt->gsc.intf[intf_id].irq); ++ ret = generic_handle_irq_safe(gt->gsc.intf[intf_id].irq); + if (ret) + drm_err_ratelimited(>->i915->drm, "error handling GSC irq: %d\n", ret); + } +diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c +index 92085ffd23de0e..4eb78895773f6f 100644 +--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c ++++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c +@@ -573,7 +573,6 @@ static int ring_context_alloc(struct intel_context *ce) + /* One ringbuffer to rule them all */ + GEM_BUG_ON(!engine->legacy.ring); + ce->ring = engine->legacy.ring; +- ce->timeline = intel_timeline_get(engine->legacy.timeline); + + GEM_BUG_ON(ce->state); + if (engine->context_size) { +@@ -586,6 +585,8 @@ static int ring_context_alloc(struct intel_context *ce) + ce->state = vma; + } + ++ ce->timeline = intel_timeline_get(engine->legacy.timeline); ++ + return 0; + } + +diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c +index a9b79888c19316..c7ce2c570ad1f8 100644 +--- a/drivers/gpu/drm/i915/selftests/i915_request.c ++++ b/drivers/gpu/drm/i915/selftests/i915_request.c +@@ -73,8 +73,8 @@ static int igt_add_request(void *arg) + /* Basic preliminary test to create a request and let it loose! */ + + request = mock_request(rcs0(i915)->kernel_context, HZ / 10); +- if (!request) +- return -ENOMEM; ++ if (IS_ERR(request)) ++ return PTR_ERR(request); + + i915_request_add(request); + +@@ -91,8 +91,8 @@ static int igt_wait_request(void *arg) + /* Submit a request, then wait upon it */ + + request = mock_request(rcs0(i915)->kernel_context, T); +- if (!request) +- return -ENOMEM; ++ if (IS_ERR(request)) ++ return PTR_ERR(request); + + i915_request_get(request); + +@@ -160,8 +160,8 @@ static int igt_fence_wait(void *arg) + /* Submit a request, treat it as a fence and wait upon it */ + + request = mock_request(rcs0(i915)->kernel_context, T); +- if (!request) +- return -ENOMEM; ++ if (IS_ERR(request)) ++ return PTR_ERR(request); + + if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) { + pr_err("fence wait success before submit (expected timeout)!\n"); +@@ -219,8 +219,8 @@ static int igt_request_rewind(void *arg) + GEM_BUG_ON(IS_ERR(ce)); + request = mock_request(ce, 2 * HZ); + intel_context_put(ce); +- if (!request) { +- err = -ENOMEM; ++ if (IS_ERR(request)) { ++ err = PTR_ERR(request); + goto err_context_0; + } + +@@ -237,8 +237,8 @@ static int igt_request_rewind(void *arg) + GEM_BUG_ON(IS_ERR(ce)); + vip = mock_request(ce, 0); + intel_context_put(ce); +- if (!vip) { +- err = -ENOMEM; ++ if (IS_ERR(vip)) { ++ err = PTR_ERR(vip); + goto err_context_1; + } + +diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c +index 09f747228dff57..1b0cf073e9643f 100644 +--- a/drivers/gpu/drm/i915/selftests/mock_request.c ++++ b/drivers/gpu/drm/i915/selftests/mock_request.c +@@ -35,7 +35,7 @@ mock_request(struct intel_context *ce, unsigned long delay) + /* NB the i915->requests slab cache is enlarged to fit mock_request */ + request = intel_context_create_request(ce); + if (IS_ERR(request)) +- return NULL; ++ return request; + + request->mock.delay = delay; + return request; +diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c +index 018b39546fc1dd..bbe4f1665b6039 100644 +--- a/drivers/gpu/drm/msm/msm_gem_submit.c ++++ b/drivers/gpu/drm/msm/msm_gem_submit.c +@@ -85,6 +85,15 @@ void __msm_gem_submit_destroy(struct kref *kref) + container_of(kref, struct msm_gem_submit, ref); + unsigned i; + ++ /* ++ * In error paths, we could unref the submit without calling ++ * drm_sched_entity_push_job(), so msm_job_free() will never ++ * get called. Since drm_sched_job_cleanup() will NULL out ++ * s_fence, we can use that to detect this case. ++ */ ++ if (submit->base.s_fence) ++ drm_sched_job_cleanup(&submit->base); ++ + if (submit->fence_id) { + spin_lock(&submit->queue->idr_lock); + idr_remove(&submit->queue->fence_idr, submit->fence_id); +@@ -754,6 +763,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, + struct msm_ringbuffer *ring; + struct msm_submit_post_dep *post_deps = NULL; + struct drm_syncobj **syncobjs_to_reset = NULL; ++ struct sync_file *sync_file = NULL; + int out_fence_fd = -1; + bool has_ww_ticket = false; + unsigned i; +@@ -970,7 +980,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, + } + + if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) { +- struct sync_file *sync_file = sync_file_create(submit->user_fence); ++ sync_file = sync_file_create(submit->user_fence); + if (!sync_file) { + ret = -ENOMEM; + } else { +@@ -1003,8 +1013,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, + out_unlock: + mutex_unlock(&queue->lock); + out_post_unlock: +- if (ret && (out_fence_fd >= 0)) ++ if (ret && (out_fence_fd >= 0)) { + put_unused_fd(out_fence_fd); ++ if (sync_file) ++ fput(sync_file->file); ++ } + + if (!IS_ERR_OR_NULL(submit)) { + msm_gem_submit_put(submit); +diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c +index 8ea120eb8674bd..30676b1073034e 100644 +--- a/drivers/gpu/drm/tiny/simpledrm.c ++++ b/drivers/gpu/drm/tiny/simpledrm.c +@@ -276,7 +276,7 @@ static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev) + + static void simpledrm_device_release_clocks(void *res) + { +- struct simpledrm_device *sdev = simpledrm_device_of_dev(res); ++ struct simpledrm_device *sdev = res; + unsigned int i; + + for (i = 0; i < sdev->clk_count; ++i) { +@@ -374,7 +374,7 @@ static int simpledrm_device_init_clocks(struct simpledrm_device *sdev) + + static void simpledrm_device_release_regulators(void *res) + { +- struct simpledrm_device *sdev = simpledrm_device_of_dev(res); ++ struct simpledrm_device *sdev = res; + unsigned int i; + + for (i = 0; i < sdev->regulator_count; ++i) { +diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h +index 7f664a4b2a7521..bcef978ba9c4ca 100644 +--- a/drivers/gpu/drm/v3d/v3d_drv.h ++++ b/drivers/gpu/drm/v3d/v3d_drv.h +@@ -62,6 +62,12 @@ struct v3d_perfmon { + u64 values[]; + }; + ++enum v3d_irq { ++ V3D_CORE_IRQ, ++ V3D_HUB_IRQ, ++ V3D_MAX_IRQS, ++}; ++ + struct v3d_dev { + struct drm_device drm; + +@@ -71,6 +77,8 @@ struct v3d_dev { + int ver; + bool single_irq_line; + ++ int irq[V3D_MAX_IRQS]; ++ + void __iomem *hub_regs; + void __iomem *core_regs[3]; + void __iomem *bridge_regs; +diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c +index 2e94ce788c714b..ef991a9b1c6c46 100644 +--- a/drivers/gpu/drm/v3d/v3d_gem.c ++++ b/drivers/gpu/drm/v3d/v3d_gem.c +@@ -120,6 +120,8 @@ v3d_reset(struct v3d_dev *v3d) + if (false) + v3d_idle_axi(v3d, 0); + ++ v3d_irq_disable(v3d); ++ + v3d_idle_gca(v3d); + v3d_reset_v3d(v3d); + +diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c +index b2d59a16869728..641315dbee8b29 100644 +--- a/drivers/gpu/drm/v3d/v3d_irq.c ++++ b/drivers/gpu/drm/v3d/v3d_irq.c +@@ -215,7 +215,7 @@ v3d_hub_irq(int irq, void *arg) + int + v3d_irq_init(struct v3d_dev *v3d) + { +- int irq1, ret, core; ++ int irq, ret, core; + + INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work); + +@@ -226,17 +226,24 @@ v3d_irq_init(struct v3d_dev *v3d) + V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); + V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); + +- irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1); +- if (irq1 == -EPROBE_DEFER) +- return irq1; +- if (irq1 > 0) { +- ret = devm_request_irq(v3d->drm.dev, irq1, ++ irq = platform_get_irq_optional(v3d_to_pdev(v3d), 1); ++ if (irq == -EPROBE_DEFER) ++ return irq; ++ if (irq > 0) { ++ v3d->irq[V3D_CORE_IRQ] = irq; ++ ++ ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_CORE_IRQ], + v3d_irq, IRQF_SHARED, + "v3d_core0", v3d); + if (ret) + goto fail; +- ret = devm_request_irq(v3d->drm.dev, +- platform_get_irq(v3d_to_pdev(v3d), 0), ++ ++ irq = platform_get_irq(v3d_to_pdev(v3d), 0); ++ if (irq < 0) ++ return irq; ++ v3d->irq[V3D_HUB_IRQ] = irq; ++ ++ ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_HUB_IRQ], + v3d_hub_irq, IRQF_SHARED, + "v3d_hub", v3d); + if (ret) +@@ -244,8 +251,12 @@ v3d_irq_init(struct v3d_dev *v3d) + } else { + v3d->single_irq_line = true; + +- ret = devm_request_irq(v3d->drm.dev, +- platform_get_irq(v3d_to_pdev(v3d), 0), ++ irq = platform_get_irq(v3d_to_pdev(v3d), 0); ++ if (irq < 0) ++ return irq; ++ v3d->irq[V3D_CORE_IRQ] = irq; ++ ++ ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_CORE_IRQ], + v3d_irq, IRQF_SHARED, + "v3d", v3d); + if (ret) +@@ -286,6 +297,12 @@ v3d_irq_disable(struct v3d_dev *v3d) + V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0); + V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0); + ++ /* Finish any interrupt handler still in flight. */ ++ for (int i = 0; i < V3D_MAX_IRQS; i++) { ++ if (v3d->irq[i]) ++ synchronize_irq(v3d->irq[i]); ++ } ++ + /* Clear any pending interrupts we might have left. */ + for (core = 0; core < v3d->cores; core++) + V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); +diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c +index 51f5491648c077..e865869ccc50ee 100644 +--- a/drivers/i2c/busses/i2c-designware-master.c ++++ b/drivers/i2c/busses/i2c-designware-master.c +@@ -327,6 +327,7 @@ static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, + + dev->msgs = msgs; + dev->msgs_num = num_msgs; ++ dev->msg_write_idx = 0; + i2c_dw_xfer_init(dev); + regmap_write(dev->map, DW_IC_INTR_MASK, 0); + +diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c +index b049bba2157905..d06128501ce4e7 100644 +--- a/drivers/infiniband/hw/mlx5/counters.c ++++ b/drivers/infiniband/hw/mlx5/counters.c +@@ -387,7 +387,7 @@ static int do_get_hw_stats(struct ib_device *ibdev, + return ret; + + /* We don't expose device counters over Vports */ +- if (is_mdev_switchdev_mode(dev->mdev) && port_num != 0) ++ if (is_mdev_switchdev_mode(dev->mdev) && dev->is_rep && port_num != 0) + goto done; + + if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { +@@ -407,7 +407,7 @@ static int do_get_hw_stats(struct ib_device *ibdev, + */ + goto done; + } +- ret = mlx5_lag_query_cong_counters(dev->mdev, ++ ret = mlx5_lag_query_cong_counters(mdev, + stats->value + + cnts->num_q_counters, + cnts->num_cong_counters, +diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c +index 6e19974ecf6e71..3f1fa45d936821 100644 +--- a/drivers/infiniband/hw/mlx5/devx.c ++++ b/drivers/infiniband/hw/mlx5/devx.c +@@ -1914,6 +1914,7 @@ subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table, + /* Level1 is valid for future use, no need to free */ + return -ENOMEM; + ++ INIT_LIST_HEAD(&obj_event->obj_sub_list); + err = xa_insert(&event->object_ids, + key_level2, + obj_event, +@@ -1922,7 +1923,6 @@ subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table, + kfree(obj_event); + return err; + } +- INIT_LIST_HEAD(&obj_event->obj_sub_list); + } + + return 0; +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c +index ada7dbf8eb1cf5..e922fb87286547 100644 +--- a/drivers/infiniband/hw/mlx5/main.c ++++ b/drivers/infiniband/hw/mlx5/main.c +@@ -1690,6 +1690,33 @@ static void deallocate_uars(struct mlx5_ib_dev *dev, + context->devx_uid); + } + ++static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master, ++ struct mlx5_core_dev *slave) ++{ ++ int err; ++ ++ err = mlx5_nic_vport_update_local_lb(master, true); ++ if (err) ++ return err; ++ ++ err = mlx5_nic_vport_update_local_lb(slave, true); ++ if (err) ++ goto out; ++ ++ return 0; ++ ++out: ++ mlx5_nic_vport_update_local_lb(master, false); ++ return err; ++} ++ ++static void mlx5_ib_disable_lb_mp(struct mlx5_core_dev *master, ++ struct mlx5_core_dev *slave) ++{ ++ mlx5_nic_vport_update_local_lb(slave, false); ++ mlx5_nic_vport_update_local_lb(master, false); ++} ++ + int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) + { + int err = 0; +@@ -3224,6 +3251,8 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, + + lockdep_assert_held(&mlx5_ib_multiport_mutex); + ++ mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev); ++ + mlx5_core_mp_event_replay(ibdev->mdev, + MLX5_DRIVER_EVENT_AFFILIATION_REMOVED, + NULL); +@@ -3319,6 +3348,10 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, + MLX5_DRIVER_EVENT_AFFILIATION_DONE, + &key); + ++ err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev); ++ if (err) ++ goto unbind; ++ + return true; + + unbind: +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index e6fed973ea7411..05c00421ff2b7e 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -174,6 +174,7 @@ static const struct xpad_device { + { 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX }, + { 0x05fe, 0x3030, "Chic Controller", 0, XTYPE_XBOX }, + { 0x05fe, 0x3031, "Chic Controller", 0, XTYPE_XBOX }, ++ { 0x0502, 0x1305, "Acer NGR200", 0, XTYPE_XBOX }, + { 0x062a, 0x0020, "Logic3 Xbox GamePad", 0, XTYPE_XBOX }, + { 0x062a, 0x0033, "Competition Pro Steering Wheel", 0, XTYPE_XBOX }, + { 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX }, +@@ -514,6 +515,7 @@ static const struct usb_device_id xpad_table[] = { + XPAD_XBOX360_VENDOR(0x045e), /* Microsoft Xbox 360 controllers */ + XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft Xbox One controllers */ + XPAD_XBOX360_VENDOR(0x046d), /* Logitech Xbox 360-style controllers */ ++ XPAD_XBOX360_VENDOR(0x0502), /* Acer Inc. Xbox 360 style controllers */ + XPAD_XBOX360_VENDOR(0x056e), /* Elecom JC-U3613M */ + XPAD_XBOX360_VENDOR(0x06a3), /* Saitek P3600 */ + XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz Xbox 360 controllers */ +diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c +index b98529568eeb83..ce7e977cc8a7a1 100644 +--- a/drivers/input/misc/iqs7222.c ++++ b/drivers/input/misc/iqs7222.c +@@ -301,6 +301,7 @@ struct iqs7222_dev_desc { + int allow_offset; + int event_offset; + int comms_offset; ++ int ext_chan; + bool legacy_gesture; + struct iqs7222_reg_grp_desc reg_grps[IQS7222_NUM_REG_GRPS]; + }; +@@ -315,6 +316,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { + .allow_offset = 9, + .event_offset = 10, + .comms_offset = 12, ++ .ext_chan = 10, + .reg_grps = { + [IQS7222_REG_GRP_STAT] = { + .base = IQS7222_SYS_STATUS, +@@ -373,6 +375,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { + .allow_offset = 9, + .event_offset = 10, + .comms_offset = 12, ++ .ext_chan = 10, + .legacy_gesture = true, + .reg_grps = { + [IQS7222_REG_GRP_STAT] = { +@@ -2244,7 +2247,7 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, + const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc; + struct i2c_client *client = iqs7222->client; + int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row; +- int ext_chan = rounddown(num_chan, 10); ++ int ext_chan = dev_desc->ext_chan ? : num_chan; + int error, i; + u16 *chan_setup = iqs7222->chan_setup[chan_index]; + u16 *sys_setup = iqs7222->sys_setup; +@@ -2448,7 +2451,7 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, + const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc; + struct i2c_client *client = iqs7222->client; + int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row; +- int ext_chan = rounddown(num_chan, 10); ++ int ext_chan = dev_desc->ext_chan ? : num_chan; + int count, error, reg_offset, i; + u16 *event_mask = &iqs7222->sys_setup[dev_desc->event_offset]; + u16 *sldr_setup = iqs7222->sldr_setup[sldr_index]; +diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c +index 8ff69fbf9f65db..36fec26d2a04ac 100644 +--- a/drivers/iommu/rockchip-iommu.c ++++ b/drivers/iommu/rockchip-iommu.c +@@ -1177,7 +1177,6 @@ static int rk_iommu_of_xlate(struct device *dev, + iommu_dev = of_find_device_by_node(args->np); + + data->iommu = platform_get_drvdata(iommu_dev); +- data->iommu->domain = &rk_identity_domain; + dev_iommu_priv_set(dev, data); + + platform_device_put(iommu_dev); +@@ -1217,6 +1216,8 @@ static int rk_iommu_probe(struct platform_device *pdev) + if (!iommu) + return -ENOMEM; + ++ iommu->domain = &rk_identity_domain; ++ + platform_set_drvdata(pdev, iommu); + iommu->dev = dev; + iommu->num_mmu = 0; +diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h +index 7f893bafaa607d..c417ed34c05767 100644 +--- a/drivers/mmc/core/quirks.h ++++ b/drivers/mmc/core/quirks.h +@@ -44,6 +44,12 @@ static const struct mmc_fixup __maybe_unused mmc_sd_fixups[] = { + 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd, + MMC_QUIRK_NO_UHS_DDR50_TUNING, EXT_CSD_REV_ANY), + ++ /* ++ * Some SD cards reports discard support while they don't ++ */ ++ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd, ++ MMC_QUIRK_BROKEN_SD_DISCARD), ++ + END_FIXUP + }; + +@@ -147,12 +153,6 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = { + MMC_FIXUP("M62704", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc, + MMC_QUIRK_TRIM_BROKEN), + +- /* +- * Some SD cards reports discard support while they don't +- */ +- MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd, +- MMC_QUIRK_BROKEN_SD_DISCARD), +- + END_FIXUP + }; + +diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c +index 02f3748e46c144..cf685c0a17edc0 100644 +--- a/drivers/mmc/host/mtk-sd.c ++++ b/drivers/mmc/host/mtk-sd.c +@@ -770,12 +770,18 @@ static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma, + static void msdc_prepare_data(struct msdc_host *host, struct mmc_data *data) + { + if (!(data->host_cookie & MSDC_PREPARE_FLAG)) { +- data->host_cookie |= MSDC_PREPARE_FLAG; + data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len, + mmc_get_dma_dir(data)); ++ if (data->sg_count) ++ data->host_cookie |= MSDC_PREPARE_FLAG; + } + } + ++static bool msdc_data_prepared(struct mmc_data *data) ++{ ++ return data->host_cookie & MSDC_PREPARE_FLAG; ++} ++ + static void msdc_unprepare_data(struct msdc_host *host, struct mmc_data *data) + { + if (data->host_cookie & MSDC_ASYNC_FLAG) +@@ -1338,8 +1344,19 @@ static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq) + WARN_ON(host->mrq); + host->mrq = mrq; + +- if (mrq->data) ++ if (mrq->data) { + msdc_prepare_data(host, mrq->data); ++ if (!msdc_data_prepared(mrq->data)) { ++ host->mrq = NULL; ++ /* ++ * Failed to prepare DMA area, fail fast before ++ * starting any commands. ++ */ ++ mrq->cmd->error = -ENOSPC; ++ mmc_request_done(mmc_from_priv(host), mrq); ++ return; ++ } ++ } + + /* if SBC is required, we have HW option and SW option. + * if HW option is enabled, and SBC does not have "special" flags, +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c +index f32429ff905ff6..9796a3cb3ca62c 100644 +--- a/drivers/mmc/host/sdhci.c ++++ b/drivers/mmc/host/sdhci.c +@@ -2035,15 +2035,10 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) + + host->mmc->actual_clock = 0; + +- clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); +- if (clk & SDHCI_CLOCK_CARD_EN) +- sdhci_writew(host, clk & ~SDHCI_CLOCK_CARD_EN, +- SDHCI_CLOCK_CONTROL); ++ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + +- if (clock == 0) { +- sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); ++ if (clock == 0) + return; +- } + + clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); + sdhci_enable_clk(host, clk); +diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h +index a315cee698094f..16d7bff9eae562 100644 +--- a/drivers/mmc/host/sdhci.h ++++ b/drivers/mmc/host/sdhci.h +@@ -825,4 +825,20 @@ void sdhci_switch_external_dma(struct sdhci_host *host, bool en); + void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable); + void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd); + ++#if defined(CONFIG_DYNAMIC_DEBUG) || \ ++ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) ++#define SDHCI_DBG_ANYWAY 0 ++#elif defined(DEBUG) ++#define SDHCI_DBG_ANYWAY 1 ++#else ++#define SDHCI_DBG_ANYWAY 0 ++#endif ++ ++#define sdhci_dbg_dumpregs(host, fmt) \ ++do { \ ++ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ ++ if (DYNAMIC_DEBUG_BRANCH(descriptor) || SDHCI_DBG_ANYWAY) \ ++ sdhci_dumpregs(host); \ ++} while (0) ++ + #endif /* __SDHCI_HW_H */ +diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c +index 393ff37f0d23c1..cd21bf8f254a75 100644 +--- a/drivers/mtd/nand/spi/core.c ++++ b/drivers/mtd/nand/spi/core.c +@@ -1316,6 +1316,7 @@ static void spinand_cleanup(struct spinand_device *spinand) + { + struct nand_device *nand = spinand_to_nand(spinand); + ++ nanddev_ecc_engine_cleanup(nand); + nanddev_cleanup(nand); + spinand_manufacturer_cleanup(spinand); + kfree(spinand->databuf); +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h +index 3b70f67376331e..aa25a8a0a106f6 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h +@@ -1373,6 +1373,8 @@ + #define MDIO_VEND2_CTRL1_SS13 BIT(13) + #endif + ++#define XGBE_VEND2_MAC_AUTO_SW BIT(9) ++ + /* MDIO mask values */ + #define XGBE_AN_CL73_INT_CMPLT BIT(0) + #define XGBE_AN_CL73_INC_LINK BIT(1) +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +index 4a2dc705b52801..8345d439184ebe 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +@@ -375,6 +375,10 @@ static void xgbe_an37_set(struct xgbe_prv_data *pdata, bool enable, + reg |= MDIO_VEND2_CTRL1_AN_RESTART; + + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg); ++ ++ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL); ++ reg |= XGBE_VEND2_MAC_AUTO_SW; ++ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL, reg); + } + + static void xgbe_an37_restart(struct xgbe_prv_data *pdata) +@@ -1003,6 +1007,11 @@ static void xgbe_an37_init(struct xgbe_prv_data *pdata) + + netif_dbg(pdata, link, pdata->netdev, "CL37 AN (%s) initialized\n", + (pdata->an_mode == XGBE_AN_MODE_CL37) ? "BaseX" : "SGMII"); ++ ++ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1); ++ reg &= ~MDIO_AN_CTRL1_ENABLE; ++ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg); ++ + } + + static void xgbe_an73_init(struct xgbe_prv_data *pdata) +@@ -1404,6 +1413,10 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata) + + pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata, + &an_restart); ++ /* bail out if the link status register read fails */ ++ if (pdata->phy.link < 0) ++ return; ++ + if (an_restart) { + xgbe_phy_config_aneg(pdata); + goto adjust_link; +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +index 268399dfcf22f0..32e633d1134843 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +@@ -2855,8 +2855,7 @@ static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed) + static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) + { + struct xgbe_phy_data *phy_data = pdata->phy_data; +- unsigned int reg; +- int ret; ++ int reg, ret; + + *an_restart = 0; + +@@ -2890,11 +2889,20 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) + return 0; + } + +- /* Link status is latched low, so read once to clear +- * and then read again to get current state +- */ +- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); ++ if (reg < 0) ++ return reg; ++ ++ /* Link status is latched low so that momentary link drops ++ * can be detected. If link was already down read again ++ * to get the latest state. ++ */ ++ ++ if (!pdata->phy.link && !(reg & MDIO_STAT1_LSTATUS)) { ++ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); ++ if (reg < 0) ++ return reg; ++ } + + if (pdata->en_rx_adap) { + /* if the link is available and adaptation is done, +@@ -2913,9 +2921,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) + xgbe_phy_set_mode(pdata, phy_data->cur_mode); + } + +- /* check again for the link and adaptation status */ +- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); +- if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done) ++ if (pdata->rx_adapt_done) + return 1; + } else if (reg & MDIO_STAT1_LSTATUS) + return 1; +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h +index 173f4dad470f55..a596cd08124fa4 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe.h ++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h +@@ -292,12 +292,12 @@ + #define XGBE_LINK_TIMEOUT 5 + #define XGBE_KR_TRAINING_WAIT_ITER 50 + +-#define XGBE_SGMII_AN_LINK_STATUS BIT(1) ++#define XGBE_SGMII_AN_LINK_DUPLEX BIT(1) + #define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3)) + #define XGBE_SGMII_AN_LINK_SPEED_10 0x00 + #define XGBE_SGMII_AN_LINK_SPEED_100 0x04 + #define XGBE_SGMII_AN_LINK_SPEED_1000 0x08 +-#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4) ++#define XGBE_SGMII_AN_LINK_STATUS BIT(4) + + /* ECC correctable error notification window (seconds) */ + #define XGBE_ECC_LIMIT 60 +diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c +index 02aa6fd8ebc2d4..4ed165702d58eb 100644 +--- a/drivers/net/ethernet/atheros/atlx/atl1.c ++++ b/drivers/net/ethernet/atheros/atlx/atl1.c +@@ -1861,14 +1861,21 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) + break; + } + +- buffer_info->alloced = 1; +- buffer_info->skb = skb; +- buffer_info->length = (u16) adapter->rx_buffer_len; + page = virt_to_page(skb->data); + offset = offset_in_page(skb->data); + buffer_info->dma = dma_map_page(&pdev->dev, page, offset, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); ++ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { ++ kfree_skb(skb); ++ adapter->soft_stats.rx_dropped++; ++ break; ++ } ++ ++ buffer_info->alloced = 1; ++ buffer_info->skb = skb; ++ buffer_info->length = (u16)adapter->rx_buffer_len; ++ + rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); + rfd_desc->coalese = 0; +@@ -2183,8 +2190,8 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, + return 0; + } + +-static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, +- struct tx_packet_desc *ptpd) ++static bool atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, ++ struct tx_packet_desc *ptpd) + { + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_buffer *buffer_info; +@@ -2194,6 +2201,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, + unsigned int nr_frags; + unsigned int f; + int retval; ++ u16 first_mapped; + u16 next_to_use; + u16 data_len; + u8 hdr_len; +@@ -2201,6 +2209,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, + buf_len -= skb->data_len; + nr_frags = skb_shinfo(skb)->nr_frags; + next_to_use = atomic_read(&tpd_ring->next_to_use); ++ first_mapped = next_to_use; + buffer_info = &tpd_ring->buffer_info[next_to_use]; + BUG_ON(buffer_info->skb); + /* put skb in last TPD */ +@@ -2216,6 +2225,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, + buffer_info->dma = dma_map_page(&adapter->pdev->dev, page, + offset, hdr_len, + DMA_TO_DEVICE); ++ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) ++ goto dma_err; + + if (++next_to_use == tpd_ring->count) + next_to_use = 0; +@@ -2242,6 +2253,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, + page, offset, + buffer_info->length, + DMA_TO_DEVICE); ++ if (dma_mapping_error(&adapter->pdev->dev, ++ buffer_info->dma)) ++ goto dma_err; + if (++next_to_use == tpd_ring->count) + next_to_use = 0; + } +@@ -2254,6 +2268,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, + buffer_info->dma = dma_map_page(&adapter->pdev->dev, page, + offset, buf_len, + DMA_TO_DEVICE); ++ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) ++ goto dma_err; + if (++next_to_use == tpd_ring->count) + next_to_use = 0; + } +@@ -2277,6 +2293,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, + buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev, + frag, i * ATL1_MAX_TX_BUF_LEN, + buffer_info->length, DMA_TO_DEVICE); ++ if (dma_mapping_error(&adapter->pdev->dev, ++ buffer_info->dma)) ++ goto dma_err; + + if (++next_to_use == tpd_ring->count) + next_to_use = 0; +@@ -2285,6 +2304,22 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, + + /* last tpd's buffer-info */ + buffer_info->skb = skb; ++ ++ return true; ++ ++ dma_err: ++ while (first_mapped != next_to_use) { ++ buffer_info = &tpd_ring->buffer_info[first_mapped]; ++ dma_unmap_page(&adapter->pdev->dev, ++ buffer_info->dma, ++ buffer_info->length, ++ DMA_TO_DEVICE); ++ buffer_info->dma = 0; ++ ++ if (++first_mapped == tpd_ring->count) ++ first_mapped = 0; ++ } ++ return false; + } + + static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count, +@@ -2355,10 +2390,8 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, + + len = skb_headlen(skb); + +- if (unlikely(skb->len <= 0)) { +- dev_kfree_skb_any(skb); +- return NETDEV_TX_OK; +- } ++ if (unlikely(skb->len <= 0)) ++ goto drop_packet; + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) { +@@ -2371,10 +2404,9 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, + if (mss) { + if (skb->protocol == htons(ETH_P_IP)) { + proto_hdr_len = skb_tcp_all_headers(skb); +- if (unlikely(proto_hdr_len > len)) { +- dev_kfree_skb_any(skb); +- return NETDEV_TX_OK; +- } ++ if (unlikely(proto_hdr_len > len)) ++ goto drop_packet; ++ + /* need additional TPD ? */ + if (proto_hdr_len != len) + count += (len - proto_hdr_len + +@@ -2406,23 +2438,26 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, + } + + tso = atl1_tso(adapter, skb, ptpd); +- if (tso < 0) { +- dev_kfree_skb_any(skb); +- return NETDEV_TX_OK; +- } ++ if (tso < 0) ++ goto drop_packet; + + if (!tso) { + ret_val = atl1_tx_csum(adapter, skb, ptpd); +- if (ret_val < 0) { +- dev_kfree_skb_any(skb); +- return NETDEV_TX_OK; +- } ++ if (ret_val < 0) ++ goto drop_packet; + } + +- atl1_tx_map(adapter, skb, ptpd); ++ if (!atl1_tx_map(adapter, skb, ptpd)) ++ goto drop_packet; ++ + atl1_tx_queue(adapter, count, ptpd); + atl1_update_mailbox(adapter); + return NETDEV_TX_OK; ++ ++drop_packet: ++ adapter->soft_stats.tx_errors++; ++ dev_kfree_skb_any(skb); ++ return NETDEV_TX_OK; + } + + static int atl1_rings_clean(struct napi_struct *napi, int budget) +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index 6bf4a21853858f..8e4e8291d8c66f 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -2491,6 +2491,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + { + struct bnxt_napi *bnapi = cpr->bnapi; + u32 raw_cons = cpr->cp_raw_cons; ++ bool flush_xdp = false; + u32 cons; + int tx_pkts = 0; + int rx_pkts = 0; +@@ -2528,6 +2529,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + else + rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, + &event); ++ if (event & BNXT_REDIRECT_EVENT) ++ flush_xdp = true; + if (likely(rc >= 0)) + rx_pkts += rc; + /* Increment rx_pkts when rc is -ENOMEM to count towards +@@ -2555,8 +2558,10 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + } + } + +- if (event & BNXT_REDIRECT_EVENT) ++ if (flush_xdp) { + xdp_do_flush(); ++ event &= ~BNXT_REDIRECT_EVENT; ++ } + + if (event & BNXT_TX_EVENT) { + struct bnxt_tx_ring_info *txr = bnapi->tx_ring; +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c +index cccf0db2fb4e58..48701032c20c56 100644 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c +@@ -2057,10 +2057,10 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu) + if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) + return -EOPNOTSUPP; + +- if (netdev->mtu > enic->port_mtu) ++ if (new_mtu > enic->port_mtu) + netdev_warn(netdev, + "interface MTU (%d) set higher than port MTU (%d)\n", +- netdev->mtu, enic->port_mtu); ++ new_mtu, enic->port_mtu); + + return _enic_change_mtu(netdev, new_mtu); + } +diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +index 40e88182959519..d3c36a6f84b01d 100644 +--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c ++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +@@ -3928,6 +3928,7 @@ static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv, + MEM_TYPE_PAGE_ORDER0, NULL); + if (err) { + dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n"); ++ xdp_rxq_info_unreg(&fq->channel->xdp_rxq); + return err; + } + +@@ -4421,17 +4422,25 @@ static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv) + return -EINVAL; + } + if (err) +- return err; ++ goto out; + } + + err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX, &priv->tx_qdid); + if (err) { + dev_err(dev, "dpni_get_qdid() failed\n"); +- return err; ++ goto out; + } + + return 0; ++ ++out: ++ while (i--) { ++ if (priv->fq[i].type == DPAA2_RX_FQ && ++ xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq)) ++ xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq); ++ } ++ return err; + } + + /* Allocate rings for storing incoming frame descriptors */ +@@ -4813,6 +4822,17 @@ static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv) + } + } + ++static void dpaa2_eth_free_rx_xdp_rxq(struct dpaa2_eth_priv *priv) ++{ ++ int i; ++ ++ for (i = 0; i < priv->num_fqs; i++) { ++ if (priv->fq[i].type == DPAA2_RX_FQ && ++ xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq)) ++ xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq); ++ } ++} ++ + static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) + { + struct device *dev; +@@ -5016,6 +5036,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) + free_percpu(priv->percpu_stats); + err_alloc_percpu_stats: + dpaa2_eth_del_ch_napi(priv); ++ dpaa2_eth_free_rx_xdp_rxq(priv); + err_bind: + dpaa2_eth_free_dpbps(priv); + err_dpbp_setup: +@@ -5068,6 +5089,7 @@ static void dpaa2_eth_remove(struct fsl_mc_device *ls_dev) + free_percpu(priv->percpu_extras); + + dpaa2_eth_del_ch_napi(priv); ++ dpaa2_eth_free_rx_xdp_rxq(priv); + dpaa2_eth_free_dpbps(priv); + dpaa2_eth_free_dpio(priv); + dpaa2_eth_free_dpni(priv); +diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c +index e2f5c4384455e0..11543db4c47f0e 100644 +--- a/drivers/net/ethernet/intel/igc/igc_main.c ++++ b/drivers/net/ethernet/intel/igc/igc_main.c +@@ -6772,6 +6772,10 @@ static int igc_probe(struct pci_dev *pdev, + adapter->port_num = hw->bus.func; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + ++ /* Disable ASPM L1.2 on I226 devices to avoid packet loss */ ++ if (igc_is_device_id_i226(hw)) ++ pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2); ++ + err = pci_save_state(pdev); + if (err) + goto err_ioremap; +@@ -7144,6 +7148,9 @@ static int __maybe_unused igc_resume(struct device *dev) + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + ++ if (igc_is_device_id_i226(hw)) ++ pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2); ++ + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; +@@ -7259,6 +7266,9 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + ++ if (igc_is_device_id_i226(hw)) ++ pci_disable_link_state_locked(pdev, PCIE_LINK_STATE_L1_2); ++ + /* In case of PCI error, adapter loses its HW address + * so we should re-assign it here. + */ +diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c +index 011d74087f860d..fc6217917fc22b 100644 +--- a/drivers/net/ethernet/sun/niu.c ++++ b/drivers/net/ethernet/sun/niu.c +@@ -3336,7 +3336,7 @@ static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp, + + addr = np->ops->map_page(np->device, page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); +- if (!addr) { ++ if (np->ops->mapping_error(np->device, addr)) { + __free_page(page); + return -ENOMEM; + } +@@ -6672,6 +6672,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, + len = skb_headlen(skb); + mapping = np->ops->map_single(np->device, skb->data, + len, DMA_TO_DEVICE); ++ if (np->ops->mapping_error(np->device, mapping)) ++ goto out_drop; + + prod = rp->prod; + +@@ -6713,6 +6715,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, + mapping = np->ops->map_page(np->device, skb_frag_page(frag), + skb_frag_off(frag), len, + DMA_TO_DEVICE); ++ if (np->ops->mapping_error(np->device, mapping)) ++ goto out_unmap; + + rp->tx_buffs[prod].skb = NULL; + rp->tx_buffs[prod].mapping = mapping; +@@ -6737,6 +6741,19 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, + out: + return NETDEV_TX_OK; + ++out_unmap: ++ while (i--) { ++ const skb_frag_t *frag; ++ ++ prod = PREVIOUS_TX(rp, prod); ++ frag = &skb_shinfo(skb)->frags[i]; ++ np->ops->unmap_page(np->device, rp->tx_buffs[prod].mapping, ++ skb_frag_size(frag), DMA_TO_DEVICE); ++ } ++ ++ np->ops->unmap_single(np->device, rp->tx_buffs[rp->prod].mapping, ++ skb_headlen(skb), DMA_TO_DEVICE); ++ + out_drop: + rp->tx_errors++; + kfree_skb(skb); +@@ -9636,6 +9653,11 @@ static void niu_pci_unmap_single(struct device *dev, u64 dma_address, + dma_unmap_single(dev, dma_address, size, direction); + } + ++static int niu_pci_mapping_error(struct device *dev, u64 addr) ++{ ++ return dma_mapping_error(dev, addr); ++} ++ + static const struct niu_ops niu_pci_ops = { + .alloc_coherent = niu_pci_alloc_coherent, + .free_coherent = niu_pci_free_coherent, +@@ -9643,6 +9665,7 @@ static const struct niu_ops niu_pci_ops = { + .unmap_page = niu_pci_unmap_page, + .map_single = niu_pci_map_single, + .unmap_single = niu_pci_unmap_single, ++ .mapping_error = niu_pci_mapping_error, + }; + + static void niu_driver_version(void) +@@ -10009,6 +10032,11 @@ static void niu_phys_unmap_single(struct device *dev, u64 dma_address, + /* Nothing to do. */ + } + ++static int niu_phys_mapping_error(struct device *dev, u64 dma_address) ++{ ++ return false; ++} ++ + static const struct niu_ops niu_phys_ops = { + .alloc_coherent = niu_phys_alloc_coherent, + .free_coherent = niu_phys_free_coherent, +@@ -10016,6 +10044,7 @@ static const struct niu_ops niu_phys_ops = { + .unmap_page = niu_phys_unmap_page, + .map_single = niu_phys_map_single, + .unmap_single = niu_phys_unmap_single, ++ .mapping_error = niu_phys_mapping_error, + }; + + static int niu_of_probe(struct platform_device *op) +diff --git a/drivers/net/ethernet/sun/niu.h b/drivers/net/ethernet/sun/niu.h +index 04c215f91fc08e..0b169c08b0f2d1 100644 +--- a/drivers/net/ethernet/sun/niu.h ++++ b/drivers/net/ethernet/sun/niu.h +@@ -2879,6 +2879,9 @@ struct tx_ring_info { + #define NEXT_TX(tp, index) \ + (((index) + 1) < (tp)->pending ? ((index) + 1) : 0) + ++#define PREVIOUS_TX(tp, index) \ ++ (((index) - 1) >= 0 ? ((index) - 1) : (((tp)->pending) - 1)) ++ + static inline u32 niu_tx_avail(struct tx_ring_info *tp) + { + return (tp->pending - +@@ -3140,6 +3143,7 @@ struct niu_ops { + enum dma_data_direction direction); + void (*unmap_single)(struct device *dev, u64 dma_address, + size_t size, enum dma_data_direction direction); ++ int (*mapping_error)(struct device *dev, u64 dma_address); + }; + + struct niu_link_config { +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c +index 09173d7b87ed5c..ec5689cd240aaf 100644 +--- a/drivers/net/usb/lan78xx.c ++++ b/drivers/net/usb/lan78xx.c +@@ -4229,8 +4229,6 @@ static void lan78xx_disconnect(struct usb_interface *intf) + if (!dev) + return; + +- netif_napi_del(&dev->napi); +- + udev = interface_to_usbdev(intf); + net = dev->net; + +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c +index 3bf394b24d9711..5a949f9446a8ed 100644 +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -487,6 +487,26 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) + return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); + } + ++static int check_mergeable_len(struct net_device *dev, void *mrg_ctx, ++ unsigned int len) ++{ ++ unsigned int headroom, tailroom, room, truesize; ++ ++ truesize = mergeable_ctx_to_truesize(mrg_ctx); ++ headroom = mergeable_ctx_to_headroom(mrg_ctx); ++ tailroom = headroom ? sizeof(struct skb_shared_info) : 0; ++ room = SKB_DATA_ALIGN(headroom + tailroom); ++ ++ if (len > truesize - room) { ++ pr_debug("%s: rx error: len %u exceeds truesize %lu\n", ++ dev->name, len, (unsigned long)(truesize - room)); ++ DEV_STATS_INC(dev, rx_length_errors); ++ return -1; ++ } ++ ++ return 0; ++} ++ + static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen, + unsigned int headroom, + unsigned int len) +@@ -1084,7 +1104,8 @@ static unsigned int virtnet_get_headroom(struct virtnet_info *vi) + * across multiple buffers (num_buf > 1), and we make sure buffers + * have enough headroom. + */ +-static struct page *xdp_linearize_page(struct receive_queue *rq, ++static struct page *xdp_linearize_page(struct net_device *dev, ++ struct receive_queue *rq, + int *num_buf, + struct page *p, + int offset, +@@ -1104,18 +1125,27 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, + memcpy(page_address(page) + page_off, page_address(p) + offset, *len); + page_off += *len; + ++ /* Only mergeable mode can go inside this while loop. In small mode, ++ * *num_buf == 1, so it cannot go inside. ++ */ + while (--*num_buf) { + unsigned int buflen; + void *buf; ++ void *ctx; + int off; + +- buf = virtnet_rq_get_buf(rq, &buflen, NULL); ++ buf = virtnet_rq_get_buf(rq, &buflen, &ctx); + if (unlikely(!buf)) + goto err_buf; + + p = virt_to_head_page(buf); + off = buf - page_address(p); + ++ if (check_mergeable_len(dev, ctx, buflen)) { ++ put_page(p); ++ goto err_buf; ++ } ++ + /* guard against a misconfigured or uncooperative backend that + * is sending packet larger than the MTU. + */ +@@ -1204,7 +1234,7 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev, + headroom = vi->hdr_len + header_offset; + buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +- xdp_page = xdp_linearize_page(rq, &num_buf, page, ++ xdp_page = xdp_linearize_page(dev, rq, &num_buf, page, + offset, header_offset, + &tlen); + if (!xdp_page) +@@ -1539,7 +1569,7 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi, + */ + if (!xdp_prog->aux->xdp_has_frags) { + /* linearize data for XDP */ +- xdp_page = xdp_linearize_page(rq, num_buf, ++ xdp_page = xdp_linearize_page(vi->dev, rq, num_buf, + *page, offset, + VIRTIO_XDP_HEADROOM, + len); +diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c +index af98e871199d31..5a9e93fd1ef42a 100644 +--- a/drivers/net/wireless/ath/ath6kl/bmi.c ++++ b/drivers/net/wireless/ath/ath6kl/bmi.c +@@ -87,7 +87,9 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar, + * We need to do some backwards compatibility to make this work. + */ + if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) { +- WARN_ON(1); ++ ath6kl_err("mismatched byte count %d vs. expected %zd\n", ++ le32_to_cpu(targ_info->byte_count), ++ sizeof(*targ_info)); + return -EINVAL; + } + +diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c +index 39828eb84e0ba0..1015948ef43eb8 100644 +--- a/drivers/platform/mellanox/mlxbf-tmfifo.c ++++ b/drivers/platform/mellanox/mlxbf-tmfifo.c +@@ -281,7 +281,8 @@ static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo, + vring->align = SMP_CACHE_BYTES; + vring->index = i; + vring->vdev_id = tm_vdev->vdev.id.device; +- vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN; ++ vring->drop_desc.len = cpu_to_virtio32(&tm_vdev->vdev, ++ VRING_DROP_DESC_MAX_LEN); + dev = &tm_vdev->vdev.dev; + + size = vring_size(vring->num, vring->align); +diff --git a/drivers/platform/mellanox/mlxreg-lc.c b/drivers/platform/mellanox/mlxreg-lc.c +index 8d833836a6d322..74e9d78ff01efe 100644 +--- a/drivers/platform/mellanox/mlxreg-lc.c ++++ b/drivers/platform/mellanox/mlxreg-lc.c +@@ -688,7 +688,7 @@ static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent, + if (regval & mlxreg_lc->data->mask) { + mlxreg_lc->state |= MLXREG_LC_SYNCED; + mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_SYNCED, 1); +- if (mlxreg_lc->state & ~MLXREG_LC_POWERED) { ++ if (!(mlxreg_lc->state & MLXREG_LC_POWERED)) { + err = mlxreg_lc_power_on_off(mlxreg_lc, 1); + if (err) + goto mlxreg_lc_regmap_power_on_off_fail; +diff --git a/drivers/platform/mellanox/nvsw-sn2201.c b/drivers/platform/mellanox/nvsw-sn2201.c +index 1a7c45aa41bbf0..6b4d3c44d7bd96 100644 +--- a/drivers/platform/mellanox/nvsw-sn2201.c ++++ b/drivers/platform/mellanox/nvsw-sn2201.c +@@ -1088,7 +1088,7 @@ static int nvsw_sn2201_i2c_completion_notify(void *handle, int id) + if (!nvsw_sn2201->main_mux_devs->adapter) { + err = -ENODEV; + dev_err(nvsw_sn2201->dev, "Failed to get adapter for bus %d\n", +- nvsw_sn2201->cpld_devs->nr); ++ nvsw_sn2201->main_mux_devs->nr); + goto i2c_get_adapter_main_fail; + } + +diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c +index 2e3f6fc67c568d..7ed12c1d3b34c0 100644 +--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c ++++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c +@@ -224,6 +224,15 @@ static const struct dmi_system_id fwbug_list[] = { + DMI_MATCH(DMI_BOARD_NAME, "WUJIE14-GX4HRXL"), + } + }, ++ /* https://bugzilla.kernel.org/show_bug.cgi?id=220116 */ ++ { ++ .ident = "PCSpecialist Lafite Pro V 14M", ++ .driver_data = &quirk_spurious_8042, ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "PCSpecialist"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Lafite Pro V 14M"), ++ } ++ }, + {} + }; + +diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h b/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h +index 3ad33a094588c6..817ee7ba07ca08 100644 +--- a/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h ++++ b/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h +@@ -89,6 +89,11 @@ extern struct wmi_sysman_priv wmi_priv; + + enum { ENUM, INT, STR, PO }; + ++#define ENUM_MIN_ELEMENTS 8 ++#define INT_MIN_ELEMENTS 9 ++#define STR_MIN_ELEMENTS 8 ++#define PO_MIN_ELEMENTS 4 ++ + enum { + ATTR_NAME, + DISPL_NAME_LANG_CODE, +diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c +index 8cc212c8526683..fc2f58b4cbc6ef 100644 +--- a/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c ++++ b/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c +@@ -23,9 +23,10 @@ static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *a + obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_ENUMERATION_ATTRIBUTE_GUID); + if (!obj) + return -EIO; +- if (obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_STRING) { ++ if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < ENUM_MIN_ELEMENTS || ++ obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_STRING) { + kfree(obj); +- return -EINVAL; ++ return -EIO; + } + ret = snprintf(buf, PAGE_SIZE, "%s\n", obj->package.elements[CURRENT_VAL].string.pointer); + kfree(obj); +diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c +index 951e75b538fad4..73524806423914 100644 +--- a/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c ++++ b/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c +@@ -25,9 +25,10 @@ static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *a + obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_INTEGER_ATTRIBUTE_GUID); + if (!obj) + return -EIO; +- if (obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_INTEGER) { ++ if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < INT_MIN_ELEMENTS || ++ obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_INTEGER) { + kfree(obj); +- return -EINVAL; ++ return -EIO; + } + ret = snprintf(buf, PAGE_SIZE, "%lld\n", obj->package.elements[CURRENT_VAL].integer.value); + kfree(obj); +diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c +index d8f1bf5e58a0f4..3167e06d416ede 100644 +--- a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c ++++ b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c +@@ -26,9 +26,10 @@ static ssize_t is_enabled_show(struct kobject *kobj, struct kobj_attribute *attr + obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_PASSOBJ_ATTRIBUTE_GUID); + if (!obj) + return -EIO; +- if (obj->package.elements[IS_PASS_SET].type != ACPI_TYPE_INTEGER) { ++ if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < PO_MIN_ELEMENTS || ++ obj->package.elements[IS_PASS_SET].type != ACPI_TYPE_INTEGER) { + kfree(obj); +- return -EINVAL; ++ return -EIO; + } + ret = snprintf(buf, PAGE_SIZE, "%lld\n", obj->package.elements[IS_PASS_SET].integer.value); + kfree(obj); +diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c +index c392f0ecf8b55b..0d2c74f8d1aad7 100644 +--- a/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c ++++ b/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c +@@ -25,9 +25,10 @@ static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *a + obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_STRING_ATTRIBUTE_GUID); + if (!obj) + return -EIO; +- if (obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_STRING) { ++ if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < STR_MIN_ELEMENTS || ++ obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_STRING) { + kfree(obj); +- return -EINVAL; ++ return -EIO; + } + ret = snprintf(buf, PAGE_SIZE, "%s\n", obj->package.elements[CURRENT_VAL].string.pointer); + kfree(obj); +diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c +index af49dd6b31ade7..f5402b71465729 100644 +--- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c ++++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c +@@ -25,7 +25,6 @@ struct wmi_sysman_priv wmi_priv = { + /* reset bios to defaults */ + static const char * const reset_types[] = {"builtinsafe", "lastknowngood", "factory", "custom"}; + static int reset_option = -1; +-static struct class *fw_attr_class; + + + /** +@@ -408,10 +407,10 @@ static int init_bios_attributes(int attr_type, const char *guid) + return retval; + + switch (attr_type) { +- case ENUM: min_elements = 8; break; +- case INT: min_elements = 9; break; +- case STR: min_elements = 8; break; +- case PO: min_elements = 4; break; ++ case ENUM: min_elements = ENUM_MIN_ELEMENTS; break; ++ case INT: min_elements = INT_MIN_ELEMENTS; break; ++ case STR: min_elements = STR_MIN_ELEMENTS; break; ++ case PO: min_elements = PO_MIN_ELEMENTS; break; + default: + pr_err("Error: Unknown attr_type: %d\n", attr_type); + return -EINVAL; +@@ -541,15 +540,11 @@ static int __init sysman_init(void) + goto err_exit_bios_attr_pass_interface; + } + +- ret = fw_attributes_class_get(&fw_attr_class); +- if (ret) +- goto err_exit_bios_attr_pass_interface; +- +- wmi_priv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0), ++ wmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0), + NULL, "%s", DRIVER_NAME); + if (IS_ERR(wmi_priv.class_dev)) { + ret = PTR_ERR(wmi_priv.class_dev); +- goto err_unregister_class; ++ goto err_exit_bios_attr_pass_interface; + } + + wmi_priv.main_dir_kset = kset_create_and_add("attributes", NULL, +@@ -602,10 +597,7 @@ static int __init sysman_init(void) + release_attributes_data(); + + err_destroy_classdev: +- device_destroy(fw_attr_class, MKDEV(0, 0)); +- +-err_unregister_class: +- fw_attributes_class_put(); ++ device_unregister(wmi_priv.class_dev); + + err_exit_bios_attr_pass_interface: + exit_bios_attr_pass_interface(); +@@ -619,8 +611,7 @@ static int __init sysman_init(void) + static void __exit sysman_exit(void) + { + release_attributes_data(); +- device_destroy(fw_attr_class, MKDEV(0, 0)); +- fw_attributes_class_put(); ++ device_unregister(wmi_priv.class_dev); + exit_bios_attr_set_interface(); + exit_bios_attr_pass_interface(); + } +diff --git a/drivers/platform/x86/firmware_attributes_class.c b/drivers/platform/x86/firmware_attributes_class.c +index fafe8eaf6e3e4e..e214efc97311e2 100644 +--- a/drivers/platform/x86/firmware_attributes_class.c ++++ b/drivers/platform/x86/firmware_attributes_class.c +@@ -2,48 +2,35 @@ + + /* Firmware attributes class helper module */ + +-#include +-#include + #include + #include "firmware_attributes_class.h" + +-static DEFINE_MUTEX(fw_attr_lock); +-static int fw_attr_inuse; +- +-static struct class firmware_attributes_class = { ++const struct class firmware_attributes_class = { + .name = "firmware-attributes", + }; ++EXPORT_SYMBOL_GPL(firmware_attributes_class); ++ ++static __init int fw_attributes_class_init(void) ++{ ++ return class_register(&firmware_attributes_class); ++} ++module_init(fw_attributes_class_init); ++ ++static __exit void fw_attributes_class_exit(void) ++{ ++ class_unregister(&firmware_attributes_class); ++} ++module_exit(fw_attributes_class_exit); + +-int fw_attributes_class_get(struct class **fw_attr_class) ++int fw_attributes_class_get(const struct class **fw_attr_class) + { +- int err; +- +- mutex_lock(&fw_attr_lock); +- if (!fw_attr_inuse) { /*first time class is being used*/ +- err = class_register(&firmware_attributes_class); +- if (err) { +- mutex_unlock(&fw_attr_lock); +- return err; +- } +- } +- fw_attr_inuse++; + *fw_attr_class = &firmware_attributes_class; +- mutex_unlock(&fw_attr_lock); + return 0; + } + EXPORT_SYMBOL_GPL(fw_attributes_class_get); + + int fw_attributes_class_put(void) + { +- mutex_lock(&fw_attr_lock); +- if (!fw_attr_inuse) { +- mutex_unlock(&fw_attr_lock); +- return -EINVAL; +- } +- fw_attr_inuse--; +- if (!fw_attr_inuse) /* No more consumers */ +- class_unregister(&firmware_attributes_class); +- mutex_unlock(&fw_attr_lock); + return 0; + } + EXPORT_SYMBOL_GPL(fw_attributes_class_put); +diff --git a/drivers/platform/x86/firmware_attributes_class.h b/drivers/platform/x86/firmware_attributes_class.h +index 486485cb1f54e3..ef6c3764a83497 100644 +--- a/drivers/platform/x86/firmware_attributes_class.h ++++ b/drivers/platform/x86/firmware_attributes_class.h +@@ -5,7 +5,10 @@ + #ifndef FW_ATTR_CLASS_H + #define FW_ATTR_CLASS_H + +-int fw_attributes_class_get(struct class **fw_attr_class); ++#include ++ ++extern const struct class firmware_attributes_class; ++int fw_attributes_class_get(const struct class **fw_attr_class); + int fw_attributes_class_put(void); + + #endif /* FW_ATTR_CLASS_H */ +diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c +index 6ddca857cc4d1a..b62b158cffd85a 100644 +--- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c ++++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c +@@ -24,8 +24,6 @@ struct bioscfg_priv bioscfg_drv = { + .mutex = __MUTEX_INITIALIZER(bioscfg_drv.mutex), + }; + +-static struct class *fw_attr_class; +- + ssize_t display_name_language_code_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +@@ -974,11 +972,7 @@ static int __init hp_init(void) + if (ret) + return ret; + +- ret = fw_attributes_class_get(&fw_attr_class); +- if (ret) +- goto err_unregister_class; +- +- bioscfg_drv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0), ++ bioscfg_drv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0), + NULL, "%s", DRIVER_NAME); + if (IS_ERR(bioscfg_drv.class_dev)) { + ret = PTR_ERR(bioscfg_drv.class_dev); +@@ -1045,10 +1039,9 @@ static int __init hp_init(void) + release_attributes_data(); + + err_destroy_classdev: +- device_destroy(fw_attr_class, MKDEV(0, 0)); ++ device_unregister(bioscfg_drv.class_dev); + + err_unregister_class: +- fw_attributes_class_put(); + hp_exit_attr_set_interface(); + + return ret; +@@ -1057,9 +1050,8 @@ static int __init hp_init(void) + static void __exit hp_exit(void) + { + release_attributes_data(); +- device_destroy(fw_attr_class, MKDEV(0, 0)); ++ device_unregister(bioscfg_drv.class_dev); + +- fw_attributes_class_put(); + hp_exit_attr_set_interface(); + } + +diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c +index 2396decdb3cb3f..d5319b4637e18d 100644 +--- a/drivers/platform/x86/think-lmi.c ++++ b/drivers/platform/x86/think-lmi.c +@@ -195,7 +195,6 @@ static const char * const level_options[] = { + [TLMI_LEVEL_MASTER] = "master", + }; + static struct think_lmi tlmi_priv; +-static struct class *fw_attr_class; + static DEFINE_MUTEX(tlmi_mutex); + + /* ------ Utility functions ------------*/ +@@ -917,6 +916,7 @@ static const struct attribute_group auth_attr_group = { + .is_visible = auth_attr_is_visible, + .attrs = auth_attrs, + }; ++__ATTRIBUTE_GROUPS(auth_attr); + + /* ---- Attributes sysfs --------------------------------------------------------- */ + static ssize_t display_name_show(struct kobject *kobj, struct kobj_attribute *attr, +@@ -1120,6 +1120,7 @@ static const struct attribute_group tlmi_attr_group = { + .is_visible = attr_is_visible, + .attrs = tlmi_attrs, + }; ++__ATTRIBUTE_GROUPS(tlmi_attr); + + static void tlmi_attr_setting_release(struct kobject *kobj) + { +@@ -1139,11 +1140,13 @@ static void tlmi_pwd_setting_release(struct kobject *kobj) + static const struct kobj_type tlmi_attr_setting_ktype = { + .release = &tlmi_attr_setting_release, + .sysfs_ops = &kobj_sysfs_ops, ++ .default_groups = tlmi_attr_groups, + }; + + static const struct kobj_type tlmi_pwd_setting_ktype = { + .release = &tlmi_pwd_setting_release, + .sysfs_ops = &kobj_sysfs_ops, ++ .default_groups = auth_attr_groups, + }; + + static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr, +@@ -1213,19 +1216,16 @@ static struct kobj_attribute debug_cmd = __ATTR_WO(debug_cmd); + /* ---- Initialisation --------------------------------------------------------- */ + static void tlmi_release_attr(void) + { +- int i; ++ struct kobject *pos, *n; + + /* Attribute structures */ +- for (i = 0; i < TLMI_SETTINGS_COUNT; i++) { +- if (tlmi_priv.setting[i]) { +- sysfs_remove_group(&tlmi_priv.setting[i]->kobj, &tlmi_attr_group); +- kobject_put(&tlmi_priv.setting[i]->kobj); +- } +- } + sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr); + if (tlmi_priv.can_debug_cmd && debug_support) + sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &debug_cmd.attr); + ++ list_for_each_entry_safe(pos, n, &tlmi_priv.attribute_kset->list, entry) ++ kobject_put(pos); ++ + kset_unregister(tlmi_priv.attribute_kset); + + /* Free up any saved signatures */ +@@ -1233,19 +1233,8 @@ static void tlmi_release_attr(void) + kfree(tlmi_priv.pwd_admin->save_signature); + + /* Authentication structures */ +- sysfs_remove_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group); +- kobject_put(&tlmi_priv.pwd_admin->kobj); +- sysfs_remove_group(&tlmi_priv.pwd_power->kobj, &auth_attr_group); +- kobject_put(&tlmi_priv.pwd_power->kobj); +- +- if (tlmi_priv.opcode_support) { +- sysfs_remove_group(&tlmi_priv.pwd_system->kobj, &auth_attr_group); +- kobject_put(&tlmi_priv.pwd_system->kobj); +- sysfs_remove_group(&tlmi_priv.pwd_hdd->kobj, &auth_attr_group); +- kobject_put(&tlmi_priv.pwd_hdd->kobj); +- sysfs_remove_group(&tlmi_priv.pwd_nvme->kobj, &auth_attr_group); +- kobject_put(&tlmi_priv.pwd_nvme->kobj); +- } ++ list_for_each_entry_safe(pos, n, &tlmi_priv.authentication_kset->list, entry) ++ kobject_put(pos); + + kset_unregister(tlmi_priv.authentication_kset); + } +@@ -1272,11 +1261,7 @@ static int tlmi_sysfs_init(void) + { + int i, ret; + +- ret = fw_attributes_class_get(&fw_attr_class); +- if (ret) +- return ret; +- +- tlmi_priv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0), ++ tlmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0), + NULL, "%s", "thinklmi"); + if (IS_ERR(tlmi_priv.class_dev)) { + ret = PTR_ERR(tlmi_priv.class_dev); +@@ -1290,6 +1275,14 @@ static int tlmi_sysfs_init(void) + goto fail_device_created; + } + ++ tlmi_priv.authentication_kset = kset_create_and_add("authentication", NULL, ++ &tlmi_priv.class_dev->kobj); ++ if (!tlmi_priv.authentication_kset) { ++ kset_unregister(tlmi_priv.attribute_kset); ++ ret = -ENOMEM; ++ goto fail_device_created; ++ } ++ + for (i = 0; i < TLMI_SETTINGS_COUNT; i++) { + /* Check if index is a valid setting - skip if it isn't */ + if (!tlmi_priv.setting[i]) +@@ -1306,12 +1299,8 @@ static int tlmi_sysfs_init(void) + + /* Build attribute */ + tlmi_priv.setting[i]->kobj.kset = tlmi_priv.attribute_kset; +- ret = kobject_add(&tlmi_priv.setting[i]->kobj, NULL, +- "%s", tlmi_priv.setting[i]->display_name); +- if (ret) +- goto fail_create_attr; +- +- ret = sysfs_create_group(&tlmi_priv.setting[i]->kobj, &tlmi_attr_group); ++ ret = kobject_init_and_add(&tlmi_priv.setting[i]->kobj, &tlmi_attr_setting_ktype, ++ NULL, "%s", tlmi_priv.setting[i]->display_name); + if (ret) + goto fail_create_attr; + } +@@ -1327,55 +1316,34 @@ static int tlmi_sysfs_init(void) + } + + /* Create authentication entries */ +- tlmi_priv.authentication_kset = kset_create_and_add("authentication", NULL, +- &tlmi_priv.class_dev->kobj); +- if (!tlmi_priv.authentication_kset) { +- ret = -ENOMEM; +- goto fail_create_attr; +- } + tlmi_priv.pwd_admin->kobj.kset = tlmi_priv.authentication_kset; +- ret = kobject_add(&tlmi_priv.pwd_admin->kobj, NULL, "%s", "Admin"); +- if (ret) +- goto fail_create_attr; +- +- ret = sysfs_create_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group); ++ ret = kobject_init_and_add(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype, ++ NULL, "%s", "Admin"); + if (ret) + goto fail_create_attr; + + tlmi_priv.pwd_power->kobj.kset = tlmi_priv.authentication_kset; +- ret = kobject_add(&tlmi_priv.pwd_power->kobj, NULL, "%s", "Power-on"); +- if (ret) +- goto fail_create_attr; +- +- ret = sysfs_create_group(&tlmi_priv.pwd_power->kobj, &auth_attr_group); ++ ret = kobject_init_and_add(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype, ++ NULL, "%s", "Power-on"); + if (ret) + goto fail_create_attr; + + if (tlmi_priv.opcode_support) { + tlmi_priv.pwd_system->kobj.kset = tlmi_priv.authentication_kset; +- ret = kobject_add(&tlmi_priv.pwd_system->kobj, NULL, "%s", "System"); +- if (ret) +- goto fail_create_attr; +- +- ret = sysfs_create_group(&tlmi_priv.pwd_system->kobj, &auth_attr_group); ++ ret = kobject_init_and_add(&tlmi_priv.pwd_system->kobj, &tlmi_pwd_setting_ktype, ++ NULL, "%s", "System"); + if (ret) + goto fail_create_attr; + + tlmi_priv.pwd_hdd->kobj.kset = tlmi_priv.authentication_kset; +- ret = kobject_add(&tlmi_priv.pwd_hdd->kobj, NULL, "%s", "HDD"); +- if (ret) +- goto fail_create_attr; +- +- ret = sysfs_create_group(&tlmi_priv.pwd_hdd->kobj, &auth_attr_group); ++ ret = kobject_init_and_add(&tlmi_priv.pwd_hdd->kobj, &tlmi_pwd_setting_ktype, ++ NULL, "%s", "HDD"); + if (ret) + goto fail_create_attr; + + tlmi_priv.pwd_nvme->kobj.kset = tlmi_priv.authentication_kset; +- ret = kobject_add(&tlmi_priv.pwd_nvme->kobj, NULL, "%s", "NVMe"); +- if (ret) +- goto fail_create_attr; +- +- ret = sysfs_create_group(&tlmi_priv.pwd_nvme->kobj, &auth_attr_group); ++ ret = kobject_init_and_add(&tlmi_priv.pwd_nvme->kobj, &tlmi_pwd_setting_ktype, ++ NULL, "%s", "NVMe"); + if (ret) + goto fail_create_attr; + } +@@ -1385,9 +1353,8 @@ static int tlmi_sysfs_init(void) + fail_create_attr: + tlmi_release_attr(); + fail_device_created: +- device_destroy(fw_attr_class, MKDEV(0, 0)); ++ device_unregister(tlmi_priv.class_dev); + fail_class_created: +- fw_attributes_class_put(); + return ret; + } + +@@ -1409,8 +1376,6 @@ static struct tlmi_pwd_setting *tlmi_create_auth(const char *pwd_type, + new_pwd->maxlen = tlmi_priv.pwdcfg.core.max_length; + new_pwd->index = 0; + +- kobject_init(&new_pwd->kobj, &tlmi_pwd_setting_ktype); +- + return new_pwd; + } + +@@ -1514,7 +1479,6 @@ static int tlmi_analyze(void) + if (setting->possible_values) + strreplace(setting->possible_values, ',', ';'); + +- kobject_init(&setting->kobj, &tlmi_attr_setting_ktype); + tlmi_priv.setting[i] = setting; + kfree(item); + } +@@ -1610,8 +1574,7 @@ static int tlmi_analyze(void) + static void tlmi_remove(struct wmi_device *wdev) + { + tlmi_release_attr(); +- device_destroy(fw_attr_class, MKDEV(0, 0)); +- fw_attributes_class_put(); ++ device_unregister(tlmi_priv.class_dev); + } + + static int tlmi_probe(struct wmi_device *wdev, const void *context) +diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c +index f1de4111e98d9d..5a09a56698f4a0 100644 +--- a/drivers/powercap/intel_rapl_common.c ++++ b/drivers/powercap/intel_rapl_common.c +@@ -338,12 +338,28 @@ static int set_domain_enable(struct powercap_zone *power_zone, bool mode) + { + struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone); + struct rapl_defaults *defaults = get_defaults(rd->rp); ++ u64 val; + int ret; + + cpus_read_lock(); + ret = rapl_write_pl_data(rd, POWER_LIMIT1, PL_ENABLE, mode); +- if (!ret && defaults->set_floor_freq) ++ if (ret) ++ goto end; ++ ++ ret = rapl_read_pl_data(rd, POWER_LIMIT1, PL_ENABLE, false, &val); ++ if (ret) ++ goto end; ++ ++ if (mode != val) { ++ pr_debug("%s cannot be %s\n", power_zone->name, ++ str_enabled_disabled(mode)); ++ goto end; ++ } ++ ++ if (defaults->set_floor_freq) + defaults->set_floor_freq(rd, mode); ++ ++end: + cpus_read_unlock(); + + return ret; +diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c +index 48f312167e5351..8912f5be72707c 100644 +--- a/drivers/regulator/fan53555.c ++++ b/drivers/regulator/fan53555.c +@@ -147,6 +147,7 @@ struct fan53555_device_info { + unsigned int slew_mask; + const unsigned int *ramp_delay_table; + unsigned int n_ramp_values; ++ unsigned int enable_time; + unsigned int slew_rate; + }; + +@@ -282,6 +283,7 @@ static int fan53526_voltages_setup_fairchild(struct fan53555_device_info *di) + di->slew_mask = CTL_SLEW_MASK; + di->ramp_delay_table = slew_rates; + di->n_ramp_values = ARRAY_SIZE(slew_rates); ++ di->enable_time = 250; + di->vsel_count = FAN53526_NVOLTAGES; + + return 0; +@@ -296,10 +298,12 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di) + case FAN53555_CHIP_REV_00: + di->vsel_min = 600000; + di->vsel_step = 10000; ++ di->enable_time = 400; + break; + case FAN53555_CHIP_REV_13: + di->vsel_min = 800000; + di->vsel_step = 10000; ++ di->enable_time = 400; + break; + default: + dev_err(di->dev, +@@ -311,13 +315,19 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di) + case FAN53555_CHIP_ID_01: + case FAN53555_CHIP_ID_03: + case FAN53555_CHIP_ID_05: ++ di->vsel_min = 600000; ++ di->vsel_step = 10000; ++ di->enable_time = 400; ++ break; + case FAN53555_CHIP_ID_08: + di->vsel_min = 600000; + di->vsel_step = 10000; ++ di->enable_time = 175; + break; + case FAN53555_CHIP_ID_04: + di->vsel_min = 603000; + di->vsel_step = 12826; ++ di->enable_time = 400; + break; + default: + dev_err(di->dev, +@@ -350,6 +360,7 @@ static int fan53555_voltages_setup_rockchip(struct fan53555_device_info *di) + di->slew_mask = CTL_SLEW_MASK; + di->ramp_delay_table = slew_rates; + di->n_ramp_values = ARRAY_SIZE(slew_rates); ++ di->enable_time = 360; + di->vsel_count = FAN53555_NVOLTAGES; + + return 0; +@@ -372,6 +383,7 @@ static int rk8602_voltages_setup_rockchip(struct fan53555_device_info *di) + di->slew_mask = CTL_SLEW_MASK; + di->ramp_delay_table = slew_rates; + di->n_ramp_values = ARRAY_SIZE(slew_rates); ++ di->enable_time = 360; + di->vsel_count = RK8602_NVOLTAGES; + + return 0; +@@ -395,6 +407,7 @@ static int fan53555_voltages_setup_silergy(struct fan53555_device_info *di) + di->slew_mask = CTL_SLEW_MASK; + di->ramp_delay_table = slew_rates; + di->n_ramp_values = ARRAY_SIZE(slew_rates); ++ di->enable_time = 400; + di->vsel_count = FAN53555_NVOLTAGES; + + return 0; +@@ -594,6 +607,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di, + rdesc->ramp_mask = di->slew_mask; + rdesc->ramp_delay_table = di->ramp_delay_table; + rdesc->n_ramp_values = di->n_ramp_values; ++ rdesc->enable_time = di->enable_time; + rdesc->owner = THIS_MODULE; + + rdev = devm_regulator_register(di->dev, &di->desc, config); +diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c +index 65927fa2ef161c..1bdd494cf8821e 100644 +--- a/drivers/regulator/gpio-regulator.c ++++ b/drivers/regulator/gpio-regulator.c +@@ -260,8 +260,10 @@ static int gpio_regulator_probe(struct platform_device *pdev) + return -ENOMEM; + } + +- drvdata->gpiods = devm_kzalloc(dev, sizeof(struct gpio_desc *), +- GFP_KERNEL); ++ drvdata->gpiods = devm_kcalloc(dev, config->ngpios, ++ sizeof(struct gpio_desc *), GFP_KERNEL); ++ if (!drvdata->gpiods) ++ return -ENOMEM; + + if (config->input_supply) { + drvdata->desc.supply_name = devm_kstrdup(&pdev->dev, +@@ -274,8 +276,6 @@ static int gpio_regulator_probe(struct platform_device *pdev) + } + } + +- if (!drvdata->gpiods) +- return -ENOMEM; + for (i = 0; i < config->ngpios; i++) { + drvdata->gpiods[i] = devm_gpiod_get_index(dev, + NULL, +diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c +index 5849d2970bba45..095de4e0e4f388 100644 +--- a/drivers/rtc/rtc-cmos.c ++++ b/drivers/rtc/rtc-cmos.c +@@ -697,8 +697,12 @@ static irqreturn_t cmos_interrupt(int irq, void *p) + { + u8 irqstat; + u8 rtc_control; ++ unsigned long flags; + +- spin_lock(&rtc_lock); ++ /* We cannot use spin_lock() here, as cmos_interrupt() is also called ++ * in a non-irq context. ++ */ ++ spin_lock_irqsave(&rtc_lock, flags); + + /* When the HPET interrupt handler calls us, the interrupt + * status is passed as arg1 instead of the irq number. But +@@ -732,7 +736,7 @@ static irqreturn_t cmos_interrupt(int irq, void *p) + hpet_mask_rtc_irq_bit(RTC_AIE); + CMOS_READ(RTC_INTR_FLAGS); + } +- spin_unlock(&rtc_lock); ++ spin_unlock_irqrestore(&rtc_lock, flags); + + if (is_intr(irqstat)) { + rtc_update_irq(p, 1, irqstat); +@@ -1300,9 +1304,7 @@ static void cmos_check_wkalrm(struct device *dev) + * ACK the rtc irq here + */ + if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) { +- local_irq_disable(); + cmos_interrupt(0, (void *)cmos->rtc); +- local_irq_enable(); + return; + } + +diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c +index 9c04c4e1a49c37..fc079b9dcf7192 100644 +--- a/drivers/rtc/rtc-pcf2127.c ++++ b/drivers/rtc/rtc-pcf2127.c +@@ -1383,6 +1383,11 @@ static int pcf2127_i2c_probe(struct i2c_client *client) + variant = &pcf21xx_cfg[type]; + } + ++ if (variant->type == PCF2131) { ++ config.read_flag_mask = 0x0; ++ config.write_flag_mask = 0x0; ++ } ++ + config.max_register = variant->max_register, + + regmap = devm_regmap_init(&client->dev, &pcf2127_i2c_regmap, +@@ -1456,7 +1461,7 @@ static int pcf2127_spi_probe(struct spi_device *spi) + variant = &pcf21xx_cfg[type]; + } + +- config.max_register = variant->max_register, ++ config.max_register = variant->max_register; + + regmap = devm_regmap_init_spi(spi, &config); + if (IS_ERR(regmap)) { +diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c +index 0cd6f3e1488249..13b6cb1b93acd9 100644 +--- a/drivers/scsi/qla2xxx/qla_mbx.c ++++ b/drivers/scsi/qla2xxx/qla_mbx.c +@@ -2147,7 +2147,7 @@ qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle, + + pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb, + sizeof(*pdb), DMA_FROM_DEVICE); +- if (!pdb_dma) { ++ if (dma_mapping_error(&vha->hw->pdev->dev, pdb_dma)) { + ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n"); + return QLA_MEMORY_ALLOC_FAILED; + } +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c +index 675332e49a7b06..77c28d2ebf0137 100644 +--- a/drivers/scsi/qla4xxx/ql4_os.c ++++ b/drivers/scsi/qla4xxx/ql4_os.c +@@ -3420,6 +3420,8 @@ static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode) + task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data, + task->data_count, + DMA_TO_DEVICE); ++ if (dma_mapping_error(&ha->pdev->dev, task_data->data_dma)) ++ return -ENOMEM; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", +diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c +index 7dd94369abb47c..3206c84c6f22fb 100644 +--- a/drivers/spi/spi-fsl-dspi.c ++++ b/drivers/spi/spi-fsl-dspi.c +@@ -988,11 +988,20 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, + if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { + status = dspi_dma_xfer(dspi); + } else { ++ /* ++ * Reinitialize the completion before transferring data ++ * to avoid the case where it might remain in the done ++ * state due to a spurious interrupt from a previous ++ * transfer. This could falsely signal that the current ++ * transfer has completed. ++ */ ++ if (dspi->irq) ++ reinit_completion(&dspi->xfer_done); ++ + dspi_fifo_write(dspi); + + if (dspi->irq) { + wait_for_completion(&dspi->xfer_done); +- reinit_completion(&dspi->xfer_done); + } else { + do { + status = dspi_poll(dspi); +diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c +index 49d9167bb263b5..a9eb6a3e838347 100644 +--- a/drivers/target/target_core_pr.c ++++ b/drivers/target/target_core_pr.c +@@ -1841,7 +1841,9 @@ core_scsi3_decode_spec_i_port( + } + + kmem_cache_free(t10_pr_reg_cache, dest_pr_reg); +- core_scsi3_lunacl_undepend_item(dest_se_deve); ++ ++ if (dest_se_deve) ++ core_scsi3_lunacl_undepend_item(dest_se_deve); + + if (is_local) + continue; +diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c +index 3692b39b35e789..6c48255dfff02b 100644 +--- a/drivers/ufs/core/ufs-sysfs.c ++++ b/drivers/ufs/core/ufs-sysfs.c +@@ -1278,7 +1278,7 @@ UFS_UNIT_DESC_PARAM(logical_block_size, _LOGICAL_BLK_SIZE, 1); + UFS_UNIT_DESC_PARAM(logical_block_count, _LOGICAL_BLK_COUNT, 8); + UFS_UNIT_DESC_PARAM(erase_block_size, _ERASE_BLK_SIZE, 4); + UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1); +-UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8); ++UFS_UNIT_DESC_PARAM(physical_memory_resource_count, _PHY_MEM_RSRC_CNT, 8); + UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2); + UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1); + UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4); +@@ -1295,7 +1295,7 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = { + &dev_attr_logical_block_count.attr, + &dev_attr_erase_block_size.attr, + &dev_attr_provisioning_type.attr, +- &dev_attr_physical_memory_resourse_count.attr, ++ &dev_attr_physical_memory_resource_count.attr, + &dev_attr_context_capabilities.attr, + &dev_attr_large_unit_granularity.attr, + &dev_attr_wb_buf_alloc_units.attr, +diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c +index 412931cf240f64..da20bd3d46bc78 100644 +--- a/drivers/ufs/core/ufshcd.c ++++ b/drivers/ufs/core/ufshcd.c +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -289,8 +290,8 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); + static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); + static void ufshcd_resume_clkscaling(struct ufs_hba *hba); + static void ufshcd_suspend_clkscaling(struct ufs_hba *hba); +-static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba); +-static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up); ++static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, ++ bool scale_up); + static irqreturn_t ufshcd_intr(int irq, void *__hba); + static int ufshcd_change_power_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode); +@@ -1079,14 +1080,32 @@ static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up) + return ret; + } + ++static int ufshcd_opp_set_rate(struct ufs_hba *hba, unsigned long freq) ++{ ++ struct dev_pm_opp *opp; ++ int ret; ++ ++ opp = dev_pm_opp_find_freq_floor_indexed(hba->dev, ++ &freq, 0); ++ if (IS_ERR(opp)) ++ return PTR_ERR(opp); ++ ++ ret = dev_pm_opp_set_opp(hba->dev, opp); ++ dev_pm_opp_put(opp); ++ ++ return ret; ++} ++ + /** + * ufshcd_scale_clks - scale up or scale down UFS controller clocks + * @hba: per adapter instance ++ * @freq: frequency to scale + * @scale_up: True if scaling up and false if scaling down + * + * Return: 0 if successful; < 0 upon failure. + */ +-static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) ++static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, ++ bool scale_up) + { + int ret = 0; + ktime_t start = ktime_get(); +@@ -1095,13 +1114,21 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) + if (ret) + goto out; + +- ret = ufshcd_set_clk_freq(hba, scale_up); ++ if (hba->use_pm_opp) ++ ret = ufshcd_opp_set_rate(hba, freq); ++ else ++ ret = ufshcd_set_clk_freq(hba, scale_up); + if (ret) + goto out; + + ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); +- if (ret) +- ufshcd_set_clk_freq(hba, !scale_up); ++ if (ret) { ++ if (hba->use_pm_opp) ++ ufshcd_opp_set_rate(hba, ++ hba->devfreq->previous_freq); ++ else ++ ufshcd_set_clk_freq(hba, !scale_up); ++ } + + out: + trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), +@@ -1113,12 +1140,13 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) + /** + * ufshcd_is_devfreq_scaling_required - check if scaling is required or not + * @hba: per adapter instance ++ * @freq: frequency to scale + * @scale_up: True if scaling up and false if scaling down + * + * Return: true if scaling is required, false otherwise. + */ + static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, +- bool scale_up) ++ unsigned long freq, bool scale_up) + { + struct ufs_clk_info *clki; + struct list_head *head = &hba->clk_list_head; +@@ -1126,6 +1154,9 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, + if (list_empty(head)) + return false; + ++ if (hba->use_pm_opp) ++ return freq != hba->clk_scaling.target_freq; ++ + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk)) { + if (scale_up && clki->max_freq) { +@@ -1324,12 +1355,14 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc + /** + * ufshcd_devfreq_scale - scale up/down UFS clocks and gear + * @hba: per adapter instance ++ * @freq: frequency to scale + * @scale_up: True for scaling up and false for scalin down + * + * Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero + * for any other errors. + */ +-static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) ++static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq, ++ bool scale_up) + { + int ret = 0; + +@@ -1344,7 +1377,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) + goto out_unprepare; + } + +- ret = ufshcd_scale_clks(hba, scale_up); ++ ret = ufshcd_scale_clks(hba, freq, scale_up); + if (ret) { + if (!scale_up) + ufshcd_scale_gear(hba, true); +@@ -1355,7 +1388,8 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) + if (scale_up) { + ret = ufshcd_scale_gear(hba, true); + if (ret) { +- ufshcd_scale_clks(hba, false); ++ ufshcd_scale_clks(hba, hba->devfreq->previous_freq, ++ false); + goto out_unprepare; + } + } +@@ -1377,9 +1411,10 @@ static void ufshcd_clk_scaling_suspend_work(struct work_struct *work) + return; + } + hba->clk_scaling.is_suspended = true; ++ hba->clk_scaling.window_start_t = 0; + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + +- __ufshcd_suspend_clkscaling(hba); ++ devfreq_suspend_device(hba->devfreq); + } + + static void ufshcd_clk_scaling_resume_work(struct work_struct *work) +@@ -1413,9 +1448,22 @@ static int ufshcd_devfreq_target(struct device *dev, + if (!ufshcd_is_clkscaling_supported(hba)) + return -EINVAL; + +- clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list); +- /* Override with the closest supported frequency */ +- *freq = (unsigned long) clk_round_rate(clki->clk, *freq); ++ if (hba->use_pm_opp) { ++ struct dev_pm_opp *opp; ++ ++ /* Get the recommended frequency from OPP framework */ ++ opp = devfreq_recommended_opp(dev, freq, flags); ++ if (IS_ERR(opp)) ++ return PTR_ERR(opp); ++ ++ dev_pm_opp_put(opp); ++ } else { ++ /* Override with the closest supported frequency */ ++ clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, ++ list); ++ *freq = (unsigned long) clk_round_rate(clki->clk, *freq); ++ } ++ + spin_lock_irqsave(hba->host->host_lock, irq_flags); + if (ufshcd_eh_in_progress(hba)) { + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); +@@ -1430,12 +1478,17 @@ static int ufshcd_devfreq_target(struct device *dev, + goto out; + } + +- /* Decide based on the rounded-off frequency and update */ +- scale_up = *freq == clki->max_freq; +- if (!scale_up) ++ /* Decide based on the target or rounded-off frequency and update */ ++ if (hba->use_pm_opp) ++ scale_up = *freq > hba->clk_scaling.target_freq; ++ else ++ scale_up = *freq == clki->max_freq; ++ ++ if (!hba->use_pm_opp && !scale_up) + *freq = clki->min_freq; ++ + /* Update the frequency */ +- if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) { ++ if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) { + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + ret = 0; + goto out; /* no state change required */ +@@ -1443,7 +1496,9 @@ static int ufshcd_devfreq_target(struct device *dev, + spin_unlock_irqrestore(hba->host->host_lock, irq_flags); + + start = ktime_get(); +- ret = ufshcd_devfreq_scale(hba, scale_up); ++ ret = ufshcd_devfreq_scale(hba, *freq, scale_up); ++ if (!ret) ++ hba->clk_scaling.target_freq = *freq; + + trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), + (scale_up ? "up" : "down"), +@@ -1463,8 +1518,6 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev, + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_clk_scaling *scaling = &hba->clk_scaling; + unsigned long flags; +- struct list_head *clk_list = &hba->clk_list_head; +- struct ufs_clk_info *clki; + ktime_t curr_t; + + if (!ufshcd_is_clkscaling_supported(hba)) +@@ -1477,17 +1530,24 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev, + if (!scaling->window_start_t) + goto start_window; + +- clki = list_first_entry(clk_list, struct ufs_clk_info, list); + /* + * If current frequency is 0, then the ondemand governor considers + * there's no initial frequency set. And it always requests to set + * to max. frequency. + */ +- stat->current_frequency = clki->curr_freq; ++ if (hba->use_pm_opp) { ++ stat->current_frequency = hba->clk_scaling.target_freq; ++ } else { ++ struct list_head *clk_list = &hba->clk_list_head; ++ struct ufs_clk_info *clki; ++ ++ clki = list_first_entry(clk_list, struct ufs_clk_info, list); ++ stat->current_frequency = clki->curr_freq; ++ } ++ + if (scaling->is_busy_started) + scaling->tot_busy_t += ktime_us_delta(curr_t, + scaling->busy_start_t); +- + stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t); + stat->busy_time = scaling->tot_busy_t; + start_window: +@@ -1516,9 +1576,11 @@ static int ufshcd_devfreq_init(struct ufs_hba *hba) + if (list_empty(clk_list)) + return 0; + +- clki = list_first_entry(clk_list, struct ufs_clk_info, list); +- dev_pm_opp_add(hba->dev, clki->min_freq, 0); +- dev_pm_opp_add(hba->dev, clki->max_freq, 0); ++ if (!hba->use_pm_opp) { ++ clki = list_first_entry(clk_list, struct ufs_clk_info, list); ++ dev_pm_opp_add(hba->dev, clki->min_freq, 0); ++ dev_pm_opp_add(hba->dev, clki->max_freq, 0); ++ } + + ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile, + &hba->vps->ondemand_data); +@@ -1530,8 +1592,10 @@ static int ufshcd_devfreq_init(struct ufs_hba *hba) + ret = PTR_ERR(devfreq); + dev_err(hba->dev, "Unable to register with devfreq %d\n", ret); + +- dev_pm_opp_remove(hba->dev, clki->min_freq); +- dev_pm_opp_remove(hba->dev, clki->max_freq); ++ if (!hba->use_pm_opp) { ++ dev_pm_opp_remove(hba->dev, clki->min_freq); ++ dev_pm_opp_remove(hba->dev, clki->max_freq); ++ } + return ret; + } + +@@ -1543,7 +1607,6 @@ static int ufshcd_devfreq_init(struct ufs_hba *hba) + static void ufshcd_devfreq_remove(struct ufs_hba *hba) + { + struct list_head *clk_list = &hba->clk_list_head; +- struct ufs_clk_info *clki; + + if (!hba->devfreq) + return; +@@ -1551,19 +1614,13 @@ static void ufshcd_devfreq_remove(struct ufs_hba *hba) + devfreq_remove_device(hba->devfreq); + hba->devfreq = NULL; + +- clki = list_first_entry(clk_list, struct ufs_clk_info, list); +- dev_pm_opp_remove(hba->dev, clki->min_freq); +- dev_pm_opp_remove(hba->dev, clki->max_freq); +-} ++ if (!hba->use_pm_opp) { ++ struct ufs_clk_info *clki; + +-static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba) +-{ +- unsigned long flags; +- +- devfreq_suspend_device(hba->devfreq); +- spin_lock_irqsave(hba->host->host_lock, flags); +- hba->clk_scaling.window_start_t = 0; +- spin_unlock_irqrestore(hba->host->host_lock, flags); ++ clki = list_first_entry(clk_list, struct ufs_clk_info, list); ++ dev_pm_opp_remove(hba->dev, clki->min_freq); ++ dev_pm_opp_remove(hba->dev, clki->max_freq); ++ } + } + + static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) +@@ -1578,11 +1635,12 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) + if (!hba->clk_scaling.is_suspended) { + suspend = true; + hba->clk_scaling.is_suspended = true; ++ hba->clk_scaling.window_start_t = 0; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (suspend) +- __ufshcd_suspend_clkscaling(hba); ++ devfreq_suspend_device(hba->devfreq); + } + + static void ufshcd_resume_clkscaling(struct ufs_hba *hba) +@@ -1638,7 +1696,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev, + ufshcd_resume_clkscaling(hba); + } else { + ufshcd_suspend_clkscaling(hba); +- err = ufshcd_devfreq_scale(hba, true); ++ err = ufshcd_devfreq_scale(hba, ULONG_MAX, true); + if (err) + dev_err(hba->dev, "%s: failed to scale clocks up %d\n", + __func__, err); +@@ -7722,7 +7780,8 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) + hba->silence_err_logs = false; + + /* scale up clocks to max frequency before full reinitialization */ +- ufshcd_scale_clks(hba, true); ++ if (ufshcd_is_clkscaling_supported(hba)) ++ ufshcd_scale_clks(hba, ULONG_MAX, true); + + err = ufshcd_hba_enable(hba); + +@@ -9360,6 +9419,17 @@ static int ufshcd_init_clocks(struct ufs_hba *hba) + dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__, + clki->name, clk_get_rate(clki->clk)); + } ++ ++ /* Set Max. frequency for all clocks */ ++ if (hba->use_pm_opp) { ++ ret = ufshcd_opp_set_rate(hba, ULONG_MAX); ++ if (ret) { ++ dev_err(hba->dev, "%s: failed to set OPP: %d", __func__, ++ ret); ++ goto out; ++ } ++ } ++ + out: + return ret; + } +diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c +index 080a3f17a35dd7..3b17d9e4b07d8c 100644 +--- a/drivers/usb/cdns3/cdnsp-ring.c ++++ b/drivers/usb/cdns3/cdnsp-ring.c +@@ -772,7 +772,9 @@ static int cdnsp_update_port_id(struct cdnsp_device *pdev, u32 port_id) + } + + if (port_id != old_port) { +- cdnsp_disable_slot(pdev); ++ if (pdev->slot_id) ++ cdnsp_disable_slot(pdev); ++ + pdev->active_port = port; + cdnsp_enable_slot(pdev); + } +diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c +index f2ae5f4c58283a..0bee561420af29 100644 +--- a/drivers/usb/chipidea/udc.c ++++ b/drivers/usb/chipidea/udc.c +@@ -2213,6 +2213,10 @@ static void udc_suspend(struct ci_hdrc *ci) + */ + if (hw_read(ci, OP_ENDPTLISTADDR, ~0) == 0) + hw_write(ci, OP_ENDPTLISTADDR, ~0, ~0); ++ ++ if (ci->gadget.connected && ++ (!ci->suspended || !device_may_wakeup(ci->dev))) ++ usb_gadget_disconnect(&ci->gadget); + } + + static void udc_resume(struct ci_hdrc *ci, bool power_lost) +@@ -2223,6 +2227,9 @@ static void udc_resume(struct ci_hdrc *ci, bool power_lost) + OTGSC_BSVIS | OTGSC_BSVIE); + if (ci->vbus_active) + usb_gadget_vbus_disconnect(&ci->gadget); ++ } else if (ci->vbus_active && ci->driver && ++ !ci->gadget.connected) { ++ usb_gadget_connect(&ci->gadget); + } + + /* Restore value 0 if it was set for power lost check */ +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index c979ecd0169a2d..46db600fdd824e 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -227,7 +227,8 @@ static const struct usb_device_id usb_quirk_list[] = { + { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME }, + + /* Logitech HD Webcam C270 */ +- { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, ++ { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME | ++ USB_QUIRK_NO_LPM}, + + /* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */ + { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, +diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c +index fab9e6be4e27ae..2cd8c757c65342 100644 +--- a/drivers/usb/host/xhci-dbgcap.c ++++ b/drivers/usb/host/xhci-dbgcap.c +@@ -639,6 +639,10 @@ static void xhci_dbc_stop(struct xhci_dbc *dbc) + case DS_DISABLED: + return; + case DS_CONFIGURED: ++ spin_lock(&dbc->lock); ++ xhci_dbc_flush_requests(dbc); ++ spin_unlock(&dbc->lock); ++ + if (dbc->driver->disconnect) + dbc->driver->disconnect(dbc); + break; +diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c +index 0266c2f5bc0d8e..aa689fbd3dce67 100644 +--- a/drivers/usb/host/xhci-dbgtty.c ++++ b/drivers/usb/host/xhci-dbgtty.c +@@ -585,6 +585,7 @@ int dbc_tty_init(void) + dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; + dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL; + dbc_tty_driver->init_termios = tty_std_termios; ++ dbc_tty_driver->init_termios.c_lflag &= ~ECHO; + dbc_tty_driver->init_termios.c_cflag = + B9600 | CS8 | CREAD | HUPCL | CLOCAL; + dbc_tty_driver->init_termios.c_ispeed = 9600; +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c +index 22cca89efbfd72..cceb69d4f61e1c 100644 +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -1436,6 +1436,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, + /* Periodic endpoint bInterval limit quirk */ + if (usb_endpoint_xfer_int(&ep->desc) || + usb_endpoint_xfer_isoc(&ep->desc)) { ++ if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_9) && ++ interval >= 9) { ++ interval = 8; ++ } + if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) && + udev->speed >= USB_SPEED_HIGH && + interval >= 7) { +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index c1a172b6feae84..5abc48f148dcbc 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -65,12 +65,22 @@ + #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed + #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI 0x54ed + ++#define PCI_DEVICE_ID_AMD_ARIEL_TYPEC_XHCI 0x13ed ++#define PCI_DEVICE_ID_AMD_ARIEL_TYPEA_XHCI 0x13ee ++#define PCI_DEVICE_ID_AMD_STARSHIP_XHCI 0x148c ++#define PCI_DEVICE_ID_AMD_FIREFLIGHT_15D4_XHCI 0x15d4 ++#define PCI_DEVICE_ID_AMD_FIREFLIGHT_15D5_XHCI 0x15d5 ++#define PCI_DEVICE_ID_AMD_RAVEN_15E0_XHCI 0x15e0 ++#define PCI_DEVICE_ID_AMD_RAVEN_15E1_XHCI 0x15e1 ++#define PCI_DEVICE_ID_AMD_RAVEN2_XHCI 0x15e5 + #define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639 + #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 + #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba + #define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb + #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc + ++#define PCI_DEVICE_ID_ATI_NAVI10_7316_XHCI 0x7316 ++ + #define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042 + #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 + #define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242 +@@ -348,6 +358,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) + if (pdev->vendor == PCI_VENDOR_ID_NEC) + xhci->quirks |= XHCI_NEC_HOST; + ++ if (pdev->vendor == PCI_VENDOR_ID_AMD && ++ (pdev->device == PCI_DEVICE_ID_AMD_ARIEL_TYPEC_XHCI || ++ pdev->device == PCI_DEVICE_ID_AMD_ARIEL_TYPEA_XHCI || ++ pdev->device == PCI_DEVICE_ID_AMD_STARSHIP_XHCI || ++ pdev->device == PCI_DEVICE_ID_AMD_FIREFLIGHT_15D4_XHCI || ++ pdev->device == PCI_DEVICE_ID_AMD_FIREFLIGHT_15D5_XHCI || ++ pdev->device == PCI_DEVICE_ID_AMD_RAVEN_15E0_XHCI || ++ pdev->device == PCI_DEVICE_ID_AMD_RAVEN_15E1_XHCI || ++ pdev->device == PCI_DEVICE_ID_AMD_RAVEN2_XHCI)) ++ xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_9; ++ ++ if (pdev->vendor == PCI_VENDOR_ID_ATI && ++ pdev->device == PCI_DEVICE_ID_ATI_NAVI10_7316_XHCI) ++ xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_9; ++ + if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96) + xhci->quirks |= XHCI_AMD_0x96_HOST; + +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c +index 8832e0cedadaff..749ba3596c2b3f 100644 +--- a/drivers/usb/host/xhci-plat.c ++++ b/drivers/usb/host/xhci-plat.c +@@ -313,7 +313,8 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s + } + + usb3_hcd = xhci_get_usb3_hcd(xhci); +- if (usb3_hcd && HCC_MAX_PSA(xhci->hcc_params) >= 4) ++ if (usb3_hcd && HCC_MAX_PSA(xhci->hcc_params) >= 4 && ++ !(xhci->quirks & XHCI_BROKEN_STREAMS)) + usb3_hcd->can_do_streams = 1; + + if (xhci->shared_hcd) { +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index 74bdd035d756a4..159cdfc7129070 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -1659,6 +1659,7 @@ struct xhci_hcd { + #define XHCI_WRITE_64_HI_LO BIT_ULL(47) + #define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48) + #define XHCI_ETRON_HOST BIT_ULL(49) ++#define XHCI_LIMIT_ENDPOINT_INTERVAL_9 BIT_ULL(50) + + unsigned int num_active_eps; + unsigned int limit_active_eps; +diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c +index 5f6fc5b79212ef..7eb78885fa2b3a 100644 +--- a/drivers/usb/typec/altmodes/displayport.c ++++ b/drivers/usb/typec/altmodes/displayport.c +@@ -324,8 +324,7 @@ static int dp_altmode_vdm(struct typec_altmode *alt, + case CMDT_RSP_NAK: + switch (cmd) { + case DP_CMD_STATUS_UPDATE: +- if (typec_altmode_exit(alt)) +- dev_err(&dp->alt->dev, "Exit Mode Failed!\n"); ++ dp->state = DP_STATE_EXIT; + break; + case DP_CMD_CONFIGURE: + dp->data.conf = 0; +@@ -528,7 +527,7 @@ static ssize_t pin_assignment_show(struct device *dev, + + assignments = get_current_pin_assignments(dp); + +- for (i = 0; assignments; assignments >>= 1, i++) { ++ for (i = 0; assignments && i < DP_PIN_ASSIGN_MAX; assignments >>= 1, i++) { + if (assignments & 1) { + if (i == cur) + len += sprintf(buf + len, "[%s] ", +diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c +index 24192a7667edf7..a25766e90f0a6e 100644 +--- a/fs/anon_inodes.c ++++ b/fs/anon_inodes.c +@@ -55,15 +55,26 @@ static struct file_system_type anon_inode_fs_type = { + .kill_sb = kill_anon_super, + }; + +-static struct inode *anon_inode_make_secure_inode( +- const char *name, +- const struct inode *context_inode) ++/** ++ * anon_inode_make_secure_inode - allocate an anonymous inode with security context ++ * @sb: [in] Superblock to allocate from ++ * @name: [in] Name of the class of the newfile (e.g., "secretmem") ++ * @context_inode: ++ * [in] Optional parent inode for security inheritance ++ * ++ * The function ensures proper security initialization through the LSM hook ++ * security_inode_init_security_anon(). ++ * ++ * Return: Pointer to new inode on success, ERR_PTR on failure. ++ */ ++struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name, ++ const struct inode *context_inode) + { + struct inode *inode; + const struct qstr qname = QSTR_INIT(name, strlen(name)); + int error; + +- inode = alloc_anon_inode(anon_inode_mnt->mnt_sb); ++ inode = alloc_anon_inode(sb); + if (IS_ERR(inode)) + return inode; + inode->i_flags &= ~S_PRIVATE; +@@ -74,6 +85,7 @@ static struct inode *anon_inode_make_secure_inode( + } + return inode; + } ++EXPORT_SYMBOL_GPL_FOR_MODULES(anon_inode_make_secure_inode, "kvm"); + + static struct file *__anon_inode_getfile(const char *name, + const struct file_operations *fops, +@@ -88,7 +100,8 @@ static struct file *__anon_inode_getfile(const char *name, + return ERR_PTR(-ENOENT); + + if (secure) { +- inode = anon_inode_make_secure_inode(name, context_inode); ++ inode = anon_inode_make_secure_inode(anon_inode_mnt->mnt_sb, ++ name, context_inode); + if (IS_ERR(inode)) { + file = ERR_CAST(inode); + goto err; +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index af1f22b3cff7dc..e8e57abb032d7a 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -4615,9 +4615,8 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) + { + struct inode *inode = d_inode(dentry); + struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; +- int err = 0; ++ int ret = 0; + struct btrfs_trans_handle *trans; +- u64 last_unlink_trans; + struct fscrypt_name fname; + + if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) +@@ -4631,55 +4630,56 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) + return btrfs_delete_subvolume(BTRFS_I(dir), dentry); + } + +- err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); +- if (err) +- return err; ++ ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); ++ if (ret) ++ return ret; + + /* This needs to handle no-key deletions later on */ + + trans = __unlink_start_trans(BTRFS_I(dir)); + if (IS_ERR(trans)) { +- err = PTR_ERR(trans); ++ ret = PTR_ERR(trans); + goto out_notrans; + } + ++ /* ++ * Propagate the last_unlink_trans value of the deleted dir to its ++ * parent directory. This is to prevent an unrecoverable log tree in the ++ * case we do something like this: ++ * 1) create dir foo ++ * 2) create snapshot under dir foo ++ * 3) delete the snapshot ++ * 4) rmdir foo ++ * 5) mkdir foo ++ * 6) fsync foo or some file inside foo ++ * ++ * This is because we can't unlink other roots when replaying the dir ++ * deletes for directory foo. ++ */ ++ if (BTRFS_I(inode)->last_unlink_trans >= trans->transid) ++ btrfs_record_snapshot_destroy(trans, BTRFS_I(dir)); ++ + if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { +- err = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry); ++ ret = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry); + goto out; + } + +- err = btrfs_orphan_add(trans, BTRFS_I(inode)); +- if (err) ++ ret = btrfs_orphan_add(trans, BTRFS_I(inode)); ++ if (ret) + goto out; + +- last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; +- + /* now the directory is empty */ +- err = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), ++ ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), + &fname.disk_name); +- if (!err) { ++ if (!ret) + btrfs_i_size_write(BTRFS_I(inode), 0); +- /* +- * Propagate the last_unlink_trans value of the deleted dir to +- * its parent directory. This is to prevent an unrecoverable +- * log tree in the case we do something like this: +- * 1) create dir foo +- * 2) create snapshot under dir foo +- * 3) delete the snapshot +- * 4) rmdir foo +- * 5) mkdir foo +- * 6) fsync foo or some file inside foo +- */ +- if (last_unlink_trans >= trans->transid) +- BTRFS_I(dir)->last_unlink_trans = last_unlink_trans; +- } + out: + btrfs_end_transaction(trans); + out_notrans: + btrfs_btree_balance_dirty(fs_info); + fscrypt_free_filename(&fname); + +- return err; ++ return ret; + } + + /* +diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c +index 86d846eb5ed492..c68e9ecbc438cc 100644 +--- a/fs/btrfs/ordered-data.c ++++ b/fs/btrfs/ordered-data.c +@@ -154,9 +154,10 @@ static struct btrfs_ordered_extent *alloc_ordered_extent( + struct btrfs_ordered_extent *entry; + int ret; + u64 qgroup_rsv = 0; ++ const bool is_nocow = (flags & ++ ((1U << BTRFS_ORDERED_NOCOW) | (1U << BTRFS_ORDERED_PREALLOC))); + +- if (flags & +- ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) { ++ if (is_nocow) { + /* For nocow write, we can release the qgroup rsv right now */ + ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv); + if (ret < 0) +@@ -171,8 +172,13 @@ static struct btrfs_ordered_extent *alloc_ordered_extent( + return ERR_PTR(ret); + } + entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); +- if (!entry) ++ if (!entry) { ++ if (!is_nocow) ++ btrfs_qgroup_free_refroot(inode->root->fs_info, ++ btrfs_root_id(inode->root), ++ qgroup_rsv, BTRFS_QGROUP_RSV_DATA); + return ERR_PTR(-ENOMEM); ++ } + + entry->file_offset = file_offset; + entry->num_bytes = num_bytes; +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index cc9a2f8a4ae3b7..13377c3b22897d 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -1087,7 +1087,9 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans, + search_key.type = BTRFS_INODE_REF_KEY; + search_key.offset = parent_objectid; + ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); +- if (ret == 0) { ++ if (ret < 0) { ++ return ret; ++ } else if (ret == 0) { + struct btrfs_inode_ref *victim_ref; + unsigned long ptr; + unsigned long ptr_end; +@@ -1160,13 +1162,13 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans, + struct fscrypt_str victim_name; + + extref = (struct btrfs_inode_extref *)(base + cur_offset); ++ victim_name.len = btrfs_inode_extref_name_len(leaf, extref); + + if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid) + goto next; + + ret = read_alloc_one_name(leaf, &extref->name, +- btrfs_inode_extref_name_len(leaf, extref), +- &victim_name); ++ victim_name.len, &victim_name); + if (ret) + return ret; + +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c +index ae129044c52f42..8f0cb7c7eedeb4 100644 +--- a/fs/f2fs/file.c ++++ b/fs/f2fs/file.c +@@ -36,9 +36,21 @@ + #include + #include + ++static void f2fs_zero_post_eof_page(struct inode *inode, loff_t new_size) ++{ ++ loff_t old_size = i_size_read(inode); ++ ++ if (old_size >= new_size) ++ return; ++ ++ /* zero or drop pages only in range of [old_size, new_size] */ ++ truncate_pagecache(inode, old_size); ++} ++ + static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf) + { + struct inode *inode = file_inode(vmf->vma->vm_file); ++ vm_flags_t flags = vmf->vma->vm_flags; + vm_fault_t ret; + + ret = filemap_fault(vmf); +@@ -46,47 +58,50 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf) + f2fs_update_iostat(F2FS_I_SB(inode), inode, + APP_MAPPED_READ_IO, F2FS_BLKSIZE); + +- trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret); ++ trace_f2fs_filemap_fault(inode, vmf->pgoff, flags, ret); + + return ret; + } + + static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) + { +- struct page *page = vmf->page; ++ struct folio *folio = page_folio(vmf->page); + struct inode *inode = file_inode(vmf->vma->vm_file); + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct dnode_of_data dn; +- bool need_alloc = true; ++ bool need_alloc = !f2fs_is_pinned_file(inode); + int err = 0; ++ vm_fault_t ret; + + if (unlikely(IS_IMMUTABLE(inode))) + return VM_FAULT_SIGBUS; + +- if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) +- return VM_FAULT_SIGBUS; ++ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) { ++ err = -EIO; ++ goto out; ++ } + + if (unlikely(f2fs_cp_error(sbi))) { + err = -EIO; +- goto err; ++ goto out; + } + + if (!f2fs_is_checkpoint_ready(sbi)) { + err = -ENOSPC; +- goto err; ++ goto out; + } + + err = f2fs_convert_inline_inode(inode); + if (err) +- goto err; ++ goto out; + + #ifdef CONFIG_F2FS_FS_COMPRESSION + if (f2fs_compressed_file(inode)) { +- int ret = f2fs_is_compressed_cluster(inode, page->index); ++ int ret = f2fs_is_compressed_cluster(inode, folio->index); + + if (ret < 0) { + err = ret; +- goto err; ++ goto out; + } else if (ret) { + need_alloc = false; + } +@@ -100,36 +115,40 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) + + f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); + ++ filemap_invalidate_lock(inode->i_mapping); ++ f2fs_zero_post_eof_page(inode, (folio->index + 1) << PAGE_SHIFT); ++ filemap_invalidate_unlock(inode->i_mapping); ++ + file_update_time(vmf->vma->vm_file); + filemap_invalidate_lock_shared(inode->i_mapping); +- lock_page(page); +- if (unlikely(page->mapping != inode->i_mapping || +- page_offset(page) > i_size_read(inode) || +- !PageUptodate(page))) { +- unlock_page(page); ++ ++ folio_lock(folio); ++ if (unlikely(folio->mapping != inode->i_mapping || ++ folio_pos(folio) > i_size_read(inode) || ++ !folio_test_uptodate(folio))) { ++ folio_unlock(folio); + err = -EFAULT; + goto out_sem; + } + ++ set_new_dnode(&dn, inode, NULL, NULL, 0); + if (need_alloc) { + /* block allocation */ +- set_new_dnode(&dn, inode, NULL, NULL, 0); +- err = f2fs_get_block_locked(&dn, page->index); +- } +- +-#ifdef CONFIG_F2FS_FS_COMPRESSION +- if (!need_alloc) { +- set_new_dnode(&dn, inode, NULL, NULL, 0); +- err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE); ++ err = f2fs_get_block_locked(&dn, folio->index); ++ } else { ++ err = f2fs_get_dnode_of_data(&dn, folio->index, LOOKUP_NODE); + f2fs_put_dnode(&dn); ++ if (f2fs_is_pinned_file(inode) && ++ !__is_valid_data_blkaddr(dn.data_blkaddr)) ++ err = -EIO; + } +-#endif ++ + if (err) { +- unlock_page(page); ++ folio_unlock(folio); + goto out_sem; + } + +- f2fs_wait_on_page_writeback(page, DATA, false, true); ++ f2fs_wait_on_page_writeback(folio_page(folio, 0), DATA, false, true); + + /* wait for GCed page writeback via META_MAPPING */ + f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); +@@ -137,29 +156,31 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) + /* + * check to see if the page is mapped already (no holes) + */ +- if (PageMappedToDisk(page)) ++ if (folio_test_mappedtodisk(folio)) + goto out_sem; + + /* page is wholly or partially inside EOF */ +- if (((loff_t)(page->index + 1) << PAGE_SHIFT) > ++ if (((loff_t)(folio->index + 1) << PAGE_SHIFT) > + i_size_read(inode)) { + loff_t offset; + + offset = i_size_read(inode) & ~PAGE_MASK; +- zero_user_segment(page, offset, PAGE_SIZE); ++ folio_zero_segment(folio, offset, folio_size(folio)); + } +- set_page_dirty(page); ++ folio_mark_dirty(folio); + + f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE); + f2fs_update_time(sbi, REQ_TIME); + +- trace_f2fs_vm_page_mkwrite(page, DATA); + out_sem: + filemap_invalidate_unlock_shared(inode->i_mapping); + + sb_end_pagefault(inode->i_sb); +-err: +- return vmf_fs_error(err); ++out: ++ ret = vmf_fs_error(err); ++ ++ trace_f2fs_vm_page_mkwrite(inode, folio->index, vmf->vma->vm_flags, ret); ++ return ret; + } + + static const struct vm_operations_struct f2fs_file_vm_ops = { +@@ -1047,6 +1068,8 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + filemap_invalidate_lock(inode->i_mapping); + ++ if (attr->ia_size > old_size) ++ f2fs_zero_post_eof_page(inode, attr->ia_size); + truncate_setsize(inode, attr->ia_size); + + if (attr->ia_size <= old_size) +@@ -1165,6 +1188,10 @@ static int f2fs_punch_hole(struct inode *inode, loff_t offset, loff_t len) + if (ret) + return ret; + ++ filemap_invalidate_lock(inode->i_mapping); ++ f2fs_zero_post_eof_page(inode, offset + len); ++ filemap_invalidate_unlock(inode->i_mapping); ++ + pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; + pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; + +@@ -1449,6 +1476,8 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len) + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + filemap_invalidate_lock(inode->i_mapping); + ++ f2fs_zero_post_eof_page(inode, offset + len); ++ + f2fs_lock_op(sbi); + f2fs_drop_extent_tree(inode); + truncate_pagecache(inode, offset); +@@ -1571,6 +1600,10 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, + if (ret) + return ret; + ++ filemap_invalidate_lock(mapping); ++ f2fs_zero_post_eof_page(inode, offset + len); ++ filemap_invalidate_unlock(mapping); ++ + pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; + pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; + +@@ -1702,6 +1735,8 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) + /* avoid gc operation during block exchange */ + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + filemap_invalidate_lock(mapping); ++ ++ f2fs_zero_post_eof_page(inode, offset + len); + truncate_pagecache(inode, offset); + + while (!ret && idx > pg_start) { +@@ -1757,6 +1792,10 @@ static int f2fs_expand_inode_data(struct inode *inode, loff_t offset, + if (err) + return err; + ++ filemap_invalidate_lock(inode->i_mapping); ++ f2fs_zero_post_eof_page(inode, offset + len); ++ filemap_invalidate_unlock(inode->i_mapping); ++ + f2fs_balance_fs(sbi, true); + + pg_start = ((unsigned long long)offset) >> PAGE_SHIFT; +@@ -3327,7 +3366,7 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg) + goto done; + } + +- if (f2fs_sb_has_blkzoned(sbi) && F2FS_HAS_BLOCKS(inode)) { ++ if (F2FS_HAS_BLOCKS(inode)) { + ret = -EFBIG; + goto out; + } +@@ -4670,6 +4709,10 @@ static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from) + err = file_modified(file); + if (err) + return err; ++ ++ filemap_invalidate_lock(inode->i_mapping); ++ f2fs_zero_post_eof_page(inode, iocb->ki_pos + iov_iter_count(from)); ++ filemap_invalidate_unlock(inode->i_mapping); + return count; + } + +@@ -4914,6 +4957,8 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) + bool dio; + bool may_need_sync = true; + int preallocated; ++ const loff_t pos = iocb->ki_pos; ++ const ssize_t count = iov_iter_count(from); + ssize_t ret; + + if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { +@@ -4935,6 +4980,12 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) + inode_lock(inode); + } + ++ if (f2fs_is_pinned_file(inode) && ++ !f2fs_overwrite_io(inode, pos, count)) { ++ ret = -EIO; ++ goto out_unlock; ++ } ++ + ret = f2fs_write_checks(iocb, from); + if (ret <= 0) + goto out_unlock; +diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c +index 0bc537de1b2958..0a26444fe20233 100644 +--- a/fs/nfs/flexfilelayout/flexfilelayout.c ++++ b/fs/nfs/flexfilelayout/flexfilelayout.c +@@ -1096,6 +1096,7 @@ static void ff_layout_reset_read(struct nfs_pgio_header *hdr) + } + + static int ff_layout_async_handle_error_v4(struct rpc_task *task, ++ u32 op_status, + struct nfs4_state *state, + struct nfs_client *clp, + struct pnfs_layout_segment *lseg, +@@ -1106,32 +1107,42 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task, + struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); + struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table; + +- switch (task->tk_status) { +- case -NFS4ERR_BADSESSION: +- case -NFS4ERR_BADSLOT: +- case -NFS4ERR_BAD_HIGH_SLOT: +- case -NFS4ERR_DEADSESSION: +- case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: +- case -NFS4ERR_SEQ_FALSE_RETRY: +- case -NFS4ERR_SEQ_MISORDERED: ++ switch (op_status) { ++ case NFS4_OK: ++ case NFS4ERR_NXIO: ++ break; ++ case NFSERR_PERM: ++ if (!task->tk_xprt) ++ break; ++ xprt_force_disconnect(task->tk_xprt); ++ goto out_retry; ++ case NFS4ERR_BADSESSION: ++ case NFS4ERR_BADSLOT: ++ case NFS4ERR_BAD_HIGH_SLOT: ++ case NFS4ERR_DEADSESSION: ++ case NFS4ERR_CONN_NOT_BOUND_TO_SESSION: ++ case NFS4ERR_SEQ_FALSE_RETRY: ++ case NFS4ERR_SEQ_MISORDERED: + dprintk("%s ERROR %d, Reset session. Exchangeid " + "flags 0x%x\n", __func__, task->tk_status, + clp->cl_exchange_flags); + nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); +- break; +- case -NFS4ERR_DELAY: +- case -NFS4ERR_GRACE: ++ goto out_retry; ++ case NFS4ERR_DELAY: ++ nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); ++ fallthrough; ++ case NFS4ERR_GRACE: + rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX); +- break; +- case -NFS4ERR_RETRY_UNCACHED_REP: +- break; ++ goto out_retry; ++ case NFS4ERR_RETRY_UNCACHED_REP: ++ goto out_retry; + /* Invalidate Layout errors */ +- case -NFS4ERR_PNFS_NO_LAYOUT: +- case -ESTALE: /* mapped NFS4ERR_STALE */ +- case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */ +- case -EISDIR: /* mapped NFS4ERR_ISDIR */ +- case -NFS4ERR_FHEXPIRED: +- case -NFS4ERR_WRONG_TYPE: ++ case NFS4ERR_PNFS_NO_LAYOUT: ++ case NFS4ERR_STALE: ++ case NFS4ERR_BADHANDLE: ++ case NFS4ERR_ISDIR: ++ case NFS4ERR_FHEXPIRED: ++ case NFS4ERR_WRONG_TYPE: + dprintk("%s Invalid layout error %d\n", __func__, + task->tk_status); + /* +@@ -1144,6 +1155,11 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task, + pnfs_destroy_layout(NFS_I(inode)); + rpc_wake_up(&tbl->slot_tbl_waitq); + goto reset; ++ default: ++ break; ++ } ++ ++ switch (task->tk_status) { + /* RPC connection errors */ + case -ECONNREFUSED: + case -EHOSTDOWN: +@@ -1159,26 +1175,56 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task, + nfs4_delete_deviceid(devid->ld, devid->nfs_client, + &devid->deviceid); + rpc_wake_up(&tbl->slot_tbl_waitq); +- fallthrough; ++ break; + default: +- if (ff_layout_avoid_mds_available_ds(lseg)) +- return -NFS4ERR_RESET_TO_PNFS; +-reset: +- dprintk("%s Retry through MDS. Error %d\n", __func__, +- task->tk_status); +- return -NFS4ERR_RESET_TO_MDS; ++ break; + } ++ ++ if (ff_layout_avoid_mds_available_ds(lseg)) ++ return -NFS4ERR_RESET_TO_PNFS; ++reset: ++ dprintk("%s Retry through MDS. Error %d\n", __func__, ++ task->tk_status); ++ return -NFS4ERR_RESET_TO_MDS; ++ ++out_retry: + task->tk_status = 0; + return -EAGAIN; + } + + /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */ + static int ff_layout_async_handle_error_v3(struct rpc_task *task, ++ u32 op_status, ++ struct nfs_client *clp, + struct pnfs_layout_segment *lseg, + u32 idx) + { + struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); + ++ switch (op_status) { ++ case NFS_OK: ++ case NFSERR_NXIO: ++ break; ++ case NFSERR_PERM: ++ if (!task->tk_xprt) ++ break; ++ xprt_force_disconnect(task->tk_xprt); ++ goto out_retry; ++ case NFSERR_ACCES: ++ case NFSERR_BADHANDLE: ++ case NFSERR_FBIG: ++ case NFSERR_IO: ++ case NFSERR_NOSPC: ++ case NFSERR_ROFS: ++ case NFSERR_STALE: ++ goto out_reset_to_pnfs; ++ case NFSERR_JUKEBOX: ++ nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); ++ goto out_retry; ++ default: ++ break; ++ } ++ + switch (task->tk_status) { + /* File access problems. Don't mark the device as unavailable */ + case -EACCES: +@@ -1197,6 +1243,7 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task, + nfs4_delete_deviceid(devid->ld, devid->nfs_client, + &devid->deviceid); + } ++out_reset_to_pnfs: + /* FIXME: Need to prevent infinite looping here. */ + return -NFS4ERR_RESET_TO_PNFS; + out_retry: +@@ -1207,6 +1254,7 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task, + } + + static int ff_layout_async_handle_error(struct rpc_task *task, ++ u32 op_status, + struct nfs4_state *state, + struct nfs_client *clp, + struct pnfs_layout_segment *lseg, +@@ -1225,10 +1273,11 @@ static int ff_layout_async_handle_error(struct rpc_task *task, + + switch (vers) { + case 3: +- return ff_layout_async_handle_error_v3(task, lseg, idx); +- case 4: +- return ff_layout_async_handle_error_v4(task, state, clp, ++ return ff_layout_async_handle_error_v3(task, op_status, clp, + lseg, idx); ++ case 4: ++ return ff_layout_async_handle_error_v4(task, op_status, state, ++ clp, lseg, idx); + default: + /* should never happen */ + WARN_ON_ONCE(1); +@@ -1281,6 +1330,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, + switch (status) { + case NFS4ERR_DELAY: + case NFS4ERR_GRACE: ++ case NFS4ERR_PERM: + break; + case NFS4ERR_NXIO: + ff_layout_mark_ds_unreachable(lseg, idx); +@@ -1313,7 +1363,8 @@ static int ff_layout_read_done_cb(struct rpc_task *task, + trace_ff_layout_read_error(hdr); + } + +- err = ff_layout_async_handle_error(task, hdr->args.context->state, ++ err = ff_layout_async_handle_error(task, hdr->res.op_status, ++ hdr->args.context->state, + hdr->ds_clp, hdr->lseg, + hdr->pgio_mirror_idx); + +@@ -1483,7 +1534,8 @@ static int ff_layout_write_done_cb(struct rpc_task *task, + trace_ff_layout_write_error(hdr); + } + +- err = ff_layout_async_handle_error(task, hdr->args.context->state, ++ err = ff_layout_async_handle_error(task, hdr->res.op_status, ++ hdr->args.context->state, + hdr->ds_clp, hdr->lseg, + hdr->pgio_mirror_idx); + +@@ -1529,8 +1581,9 @@ static int ff_layout_commit_done_cb(struct rpc_task *task, + trace_ff_layout_commit_error(data); + } + +- err = ff_layout_async_handle_error(task, NULL, data->ds_clp, +- data->lseg, data->ds_commit_index); ++ err = ff_layout_async_handle_error(task, data->res.op_status, ++ NULL, data->ds_clp, data->lseg, ++ data->ds_commit_index); + + trace_nfs4_pnfs_commit_ds(data, err); + switch (err) { +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c +index 419d98cf9e29f1..7e7dd2aab449dd 100644 +--- a/fs/nfs/inode.c ++++ b/fs/nfs/inode.c +@@ -2442,15 +2442,26 @@ EXPORT_SYMBOL_GPL(nfs_net_id); + static int nfs_net_init(struct net *net) + { + struct nfs_net *nn = net_generic(net, nfs_net_id); ++ int err; + + nfs_clients_init(net); + + if (!rpc_proc_register(net, &nn->rpcstats)) { +- nfs_clients_exit(net); +- return -ENOMEM; ++ err = -ENOMEM; ++ goto err_proc_rpc; + } + +- return nfs_fs_proc_net_init(net); ++ err = nfs_fs_proc_net_init(net); ++ if (err) ++ goto err_proc_nfs; ++ ++ return 0; ++ ++err_proc_nfs: ++ rpc_proc_unregister(net, "nfs"); ++err_proc_rpc: ++ nfs_clients_exit(net); ++ return err; + } + + static void nfs_net_exit(struct net *net) +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c +index 73aa5a63afe3fb..79d1ffdcbebd3d 100644 +--- a/fs/nfs/pnfs.c ++++ b/fs/nfs/pnfs.c +@@ -1930,8 +1930,10 @@ static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo) + static void nfs_layoutget_end(struct pnfs_layout_hdr *lo) + { + if (atomic_dec_and_test(&lo->plh_outstanding) && +- test_and_clear_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags)) ++ test_and_clear_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags)) { ++ smp_mb__after_atomic(); + wake_up_bit(&lo->plh_flags, NFS_LAYOUT_DRAIN); ++ } + } + + static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo) +diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h +index d776340ad91ce6..5c856adf7be9ec 100644 +--- a/fs/smb/client/cifsglob.h ++++ b/fs/smb/client/cifsglob.h +@@ -743,6 +743,7 @@ struct TCP_Server_Info { + __le32 session_key_id; /* retrieved from negotiate response and send in session setup request */ + struct session_key session_key; + unsigned long lstrp; /* when we got last response from this server */ ++ unsigned long neg_start; /* when negotiate started (jiffies) */ + struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */ + #define CIFS_NEGFLAVOR_UNENCAP 1 /* wct == 17, but no ext_sec */ + #define CIFS_NEGFLAVOR_EXTENDED 2 /* wct == 17, ext_sec bit set */ +@@ -1268,6 +1269,7 @@ struct cifs_tcon { + bool use_persistent:1; /* use persistent instead of durable handles */ + bool no_lease:1; /* Do not request leases on files or directories */ + bool use_witness:1; /* use witness protocol */ ++ bool dummy:1; /* dummy tcon used for reconnecting channels */ + __le32 capabilities; + __u32 share_flags; + __u32 maximal_access; +diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c +index 454420aa02220f..8298d1745f9b9c 100644 +--- a/fs/smb/client/connect.c ++++ b/fs/smb/client/connect.c +@@ -677,12 +677,12 @@ server_unresponsive(struct TCP_Server_Info *server) + /* + * If we're in the process of mounting a share or reconnecting a session + * and the server abruptly shut down (e.g. socket wasn't closed, packet +- * had been ACK'ed but no SMB response), don't wait longer than 20s to +- * negotiate protocol. ++ * had been ACK'ed but no SMB response), don't wait longer than 20s from ++ * when negotiate actually started. + */ + spin_lock(&server->srv_lock); + if (server->tcpStatus == CifsInNegotiate && +- time_after(jiffies, server->lstrp + 20 * HZ)) { ++ time_after(jiffies, server->neg_start + 20 * HZ)) { + spin_unlock(&server->srv_lock); + cifs_reconnect(server, false); + return true; +@@ -3998,6 +3998,7 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses, + + server->lstrp = jiffies; + server->tcpStatus = CifsInNegotiate; ++ server->neg_start = jiffies; + spin_unlock(&server->srv_lock); + + rc = server->ops->negotiate(xid, ses, server); +diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c +index 222348ae625866..0be16f8acd9af5 100644 +--- a/fs/smb/client/readdir.c ++++ b/fs/smb/client/readdir.c +@@ -263,7 +263,7 @@ cifs_posix_to_fattr(struct cifs_fattr *fattr, struct smb2_posix_info *info, + /* The Mode field in the response can now include the file type as well */ + fattr->cf_mode = wire_mode_to_posix(le32_to_cpu(info->Mode), + fattr->cf_cifsattrs & ATTR_DIRECTORY); +- fattr->cf_dtype = S_DT(le32_to_cpu(info->Mode)); ++ fattr->cf_dtype = S_DT(fattr->cf_mode); + + switch (fattr->cf_mode & S_IFMT) { + case S_IFLNK: +diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c +index e0f58600933059..357abb0170c495 100644 +--- a/fs/smb/client/smb2pdu.c ++++ b/fs/smb/client/smb2pdu.c +@@ -437,9 +437,9 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, + free_xid(xid); + ses->flags &= ~CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES; + +- /* regardless of rc value, setup polling */ +- queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, +- (SMB_INTERFACE_POLL_INTERVAL * HZ)); ++ if (!tcon->ipc && !tcon->dummy) ++ queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, ++ (SMB_INTERFACE_POLL_INTERVAL * HZ)); + + mutex_unlock(&ses->session_mutex); + +@@ -4228,10 +4228,8 @@ void smb2_reconnect_server(struct work_struct *work) + } + goto done; + } +- + tcon->status = TID_GOOD; +- tcon->retry = false; +- tcon->need_reconnect = false; ++ tcon->dummy = true; + + /* now reconnect sessions for necessary channels */ + list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) { +diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h +index 563cb4d8edf0c3..4dfdc521c5c985 100644 +--- a/fs/smb/client/trace.h ++++ b/fs/smb/client/trace.h +@@ -114,7 +114,7 @@ DECLARE_EVENT_CLASS(smb3_rw_err_class, + __entry->len = len; + __entry->rc = rc; + ), +- TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d", ++ TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d", + __entry->xid, __entry->sesid, __entry->tid, __entry->fid, + __entry->offset, __entry->len, __entry->rc) + ) +@@ -247,7 +247,7 @@ DECLARE_EVENT_CLASS(smb3_fd_class, + __entry->tid = tid; + __entry->sesid = sesid; + ), +- TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx", ++ TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx", + __entry->xid, __entry->sesid, __entry->tid, __entry->fid) + ) + +@@ -286,7 +286,7 @@ DECLARE_EVENT_CLASS(smb3_fd_err_class, + __entry->sesid = sesid; + __entry->rc = rc; + ), +- TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx rc=%d", ++ TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx rc=%d", + __entry->xid, __entry->sesid, __entry->tid, __entry->fid, + __entry->rc) + ) +@@ -558,7 +558,7 @@ DECLARE_EVENT_CLASS(smb3_cmd_err_class, + __entry->status = status; + __entry->rc = rc; + ), +- TP_printk("\tsid=0x%llx tid=0x%x cmd=%u mid=%llu status=0x%x rc=%d", ++ TP_printk("sid=0x%llx tid=0x%x cmd=%u mid=%llu status=0x%x rc=%d", + __entry->sesid, __entry->tid, __entry->cmd, __entry->mid, + __entry->status, __entry->rc) + ) +@@ -593,7 +593,7 @@ DECLARE_EVENT_CLASS(smb3_cmd_done_class, + __entry->cmd = cmd; + __entry->mid = mid; + ), +- TP_printk("\tsid=0x%llx tid=0x%x cmd=%u mid=%llu", ++ TP_printk("sid=0x%llx tid=0x%x cmd=%u mid=%llu", + __entry->sesid, __entry->tid, + __entry->cmd, __entry->mid) + ) +@@ -631,7 +631,7 @@ DECLARE_EVENT_CLASS(smb3_mid_class, + __entry->when_sent = when_sent; + __entry->when_received = when_received; + ), +- TP_printk("\tcmd=%u mid=%llu pid=%u, when_sent=%lu when_rcv=%lu", ++ TP_printk("cmd=%u mid=%llu pid=%u, when_sent=%lu when_rcv=%lu", + __entry->cmd, __entry->mid, __entry->pid, __entry->when_sent, + __entry->when_received) + ) +@@ -662,7 +662,7 @@ DECLARE_EVENT_CLASS(smb3_exit_err_class, + __assign_str(func_name, func_name); + __entry->rc = rc; + ), +- TP_printk("\t%s: xid=%u rc=%d", ++ TP_printk("%s: xid=%u rc=%d", + __get_str(func_name), __entry->xid, __entry->rc) + ) + +@@ -688,7 +688,7 @@ DECLARE_EVENT_CLASS(smb3_sync_err_class, + __entry->ino = ino; + __entry->rc = rc; + ), +- TP_printk("\tino=%lu rc=%d", ++ TP_printk("ino=%lu rc=%d", + __entry->ino, __entry->rc) + ) + +@@ -714,7 +714,7 @@ DECLARE_EVENT_CLASS(smb3_enter_exit_class, + __entry->xid = xid; + __assign_str(func_name, func_name); + ), +- TP_printk("\t%s: xid=%u", ++ TP_printk("%s: xid=%u", + __get_str(func_name), __entry->xid) + ) + +diff --git a/include/linux/cpu.h b/include/linux/cpu.h +index 20db7fc0651f3c..6b4f9f16968821 100644 +--- a/include/linux/cpu.h ++++ b/include/linux/cpu.h +@@ -79,6 +79,7 @@ extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev, + struct device_attribute *attr, char *buf); + extern ssize_t cpu_show_indirect_target_selection(struct device *dev, + struct device_attribute *attr, char *buf); ++extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf); + + extern __printf(4, 5) + struct device *cpu_device_create(struct device *parent, void *drvdata, +diff --git a/include/linux/export.h b/include/linux/export.h +index 9911508a9604fb..06f7a4eb649286 100644 +--- a/include/linux/export.h ++++ b/include/linux/export.h +@@ -42,11 +42,17 @@ extern struct module __this_module; + .long sym + #endif + +-#define ___EXPORT_SYMBOL(sym, license, ns) \ ++/* ++ * LLVM integrated assembler cam merge adjacent string literals (like ++ * C and GNU-as) passed to '.ascii', but not to '.asciz' and chokes on: ++ * ++ * .asciz "MODULE_" "kvm" ; ++ */ ++#define ___EXPORT_SYMBOL(sym, license, ns...) \ + .section ".export_symbol","a" ASM_NL \ + __export_symbol_##sym: ASM_NL \ + .asciz license ASM_NL \ +- .asciz ns ASM_NL \ ++ .ascii ns "\0" ASM_NL \ + __EXPORT_SYMBOL_REF(sym) ASM_NL \ + .previous + +@@ -88,4 +94,6 @@ extern struct module __this_module; + #define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", __stringify(ns)) + #define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "GPL", __stringify(ns)) + ++#define EXPORT_SYMBOL_GPL_FOR_MODULES(sym, mods) __EXPORT_SYMBOL(sym, "GPL", "module:" mods) ++ + #endif /* _LINUX_EXPORT_H */ +diff --git a/include/linux/fs.h b/include/linux/fs.h +index 81edfa1e66b608..b641a01512fb09 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -3170,6 +3170,8 @@ extern int simple_write_begin(struct file *file, struct address_space *mapping, + extern const struct address_space_operations ram_aops; + extern int always_delete_dentry(const struct dentry *); + extern struct inode *alloc_anon_inode(struct super_block *); ++struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name, ++ const struct inode *context_inode); + extern int simple_nosetlease(struct file *, int, struct file_lock **, void **); + extern const struct dentry_operations simple_dentry_operations; + +diff --git a/include/linux/libata.h b/include/linux/libata.h +index 91c4e11cb6abb4..285d709cbbde4d 100644 +--- a/include/linux/libata.h ++++ b/include/linux/libata.h +@@ -1305,7 +1305,7 @@ int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm); + int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm); + unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev, + const struct ata_acpi_gtm *gtm); +-int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm); ++int ata_acpi_cbl_pata_type(struct ata_port *ap); + #else + static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) + { +@@ -1330,10 +1330,9 @@ static inline unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev, + return 0; + } + +-static inline int ata_acpi_cbl_80wire(struct ata_port *ap, +- const struct ata_acpi_gtm *gtm) ++static inline int ata_acpi_cbl_pata_type(struct ata_port *ap) + { +- return 0; ++ return ATA_CBL_PATA40; + } + #endif + +diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h +index 8d09c2f0a9b807..c3f08af20295ca 100644 +--- a/include/linux/usb/typec_dp.h ++++ b/include/linux/usb/typec_dp.h +@@ -56,6 +56,7 @@ enum { + DP_PIN_ASSIGN_D, + DP_PIN_ASSIGN_E, + DP_PIN_ASSIGN_F, /* Not supported after v1.0b */ ++ DP_PIN_ASSIGN_MAX, + }; + + /* DisplayPort alt mode specific commands */ +diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h +index d63af08c6cdc2b..4f067599e6e9e0 100644 +--- a/include/net/bluetooth/hci_core.h ++++ b/include/net/bluetooth/hci_core.h +@@ -29,6 +29,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -339,6 +340,7 @@ struct adv_monitor { + + struct hci_dev { + struct list_head list; ++ struct srcu_struct srcu; + struct mutex lock; + + struct ida unset_handle_ida; +diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h +index b6ffae01a8cd86..f2ce7f6da87975 100644 +--- a/include/trace/events/f2fs.h ++++ b/include/trace/events/f2fs.h +@@ -1284,13 +1284,6 @@ DEFINE_EVENT(f2fs__page, f2fs_set_page_dirty, + TP_ARGS(page, type) + ); + +-DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite, +- +- TP_PROTO(struct page *page, int type), +- +- TP_ARGS(page, type) +-); +- + TRACE_EVENT(f2fs_replace_atomic_write_block, + + TP_PROTO(struct inode *inode, struct inode *cow_inode, pgoff_t index, +@@ -1328,30 +1321,50 @@ TRACE_EVENT(f2fs_replace_atomic_write_block, + __entry->recovery) + ); + +-TRACE_EVENT(f2fs_filemap_fault, ++DECLARE_EVENT_CLASS(f2fs_mmap, + +- TP_PROTO(struct inode *inode, pgoff_t index, unsigned long ret), ++ TP_PROTO(struct inode *inode, pgoff_t index, ++ vm_flags_t flags, vm_fault_t ret), + +- TP_ARGS(inode, index, ret), ++ TP_ARGS(inode, index, flags, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(pgoff_t, index) +- __field(unsigned long, ret) ++ __field(vm_flags_t, flags) ++ __field(vm_fault_t, ret) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->index = index; ++ __entry->flags = flags; + __entry->ret = ret; + ), + +- TP_printk("dev = (%d,%d), ino = %lu, index = %lu, ret = %lx", ++ TP_printk("dev = (%d,%d), ino = %lu, index = %lu, flags: %s, ret: %s", + show_dev_ino(__entry), + (unsigned long)__entry->index, +- __entry->ret) ++ __print_flags(__entry->flags, "|", FAULT_FLAG_TRACE), ++ __print_flags(__entry->ret, "|", VM_FAULT_RESULT_TRACE)) ++); ++ ++DEFINE_EVENT(f2fs_mmap, f2fs_filemap_fault, ++ ++ TP_PROTO(struct inode *inode, pgoff_t index, ++ vm_flags_t flags, vm_fault_t ret), ++ ++ TP_ARGS(inode, index, flags, ret) ++); ++ ++DEFINE_EVENT(f2fs_mmap, f2fs_vm_page_mkwrite, ++ ++ TP_PROTO(struct inode *inode, pgoff_t index, ++ vm_flags_t flags, vm_fault_t ret), ++ ++ TP_ARGS(inode, index, flags, ret) + ); + + TRACE_EVENT(f2fs_writepages, +diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h +index d5aa832f8dba3c..e9db9682316a2a 100644 +--- a/include/ufs/ufshcd.h ++++ b/include/ufs/ufshcd.h +@@ -430,6 +430,7 @@ struct ufs_clk_gating { + * @workq: workqueue to schedule devfreq suspend/resume work + * @suspend_work: worker to suspend devfreq + * @resume_work: worker to resume devfreq ++ * @target_freq: frequency requested by devfreq framework + * @min_gear: lowest HS gear to scale down to + * @is_enabled: tracks if scaling is currently enabled or not, controlled by + * clkscale_enable sysfs node +@@ -449,6 +450,7 @@ struct ufs_clk_scaling { + struct workqueue_struct *workq; + struct work_struct suspend_work; + struct work_struct resume_work; ++ unsigned long target_freq; + u32 min_gear; + bool is_enabled; + bool is_allowed; +@@ -862,6 +864,7 @@ enum ufshcd_mcq_opr { + * @auto_bkops_enabled: to track whether bkops is enabled in device + * @vreg_info: UFS device voltage regulator information + * @clk_list_head: UFS host controller clocks list node head ++ * @use_pm_opp: Indicates whether OPP based scaling is used or not + * @req_abort_count: number of times ufshcd_abort() has been called + * @lanes_per_direction: number of lanes per data direction between the UFS + * controller and the UFS device. +@@ -1014,6 +1017,7 @@ struct ufs_hba { + bool auto_bkops_enabled; + struct ufs_vreg_info vreg_info; + struct list_head clk_list_head; ++ bool use_pm_opp; + + /* Number of requests aborts */ + int req_abort_count; +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c +index 1fb3b7a0ed5d27..536acebf22b0d0 100644 +--- a/kernel/rcu/tree.c ++++ b/kernel/rcu/tree.c +@@ -2699,6 +2699,10 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in) + /* Misaligned rcu_head! */ + WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); + ++ /* Avoid NULL dereference if callback is NULL. */ ++ if (WARN_ON_ONCE(!func)) ++ return; ++ + if (debug_rcu_head_queue(head)) { + /* + * Probable double call_rcu(), so leak the callback. +diff --git a/lib/test_objagg.c b/lib/test_objagg.c +index c0c957c5063541..c0f7bb53db8d5c 100644 +--- a/lib/test_objagg.c ++++ b/lib/test_objagg.c +@@ -899,8 +899,10 @@ static int check_expect_hints_stats(struct objagg_hints *objagg_hints, + int err; + + stats = objagg_hints_stats_get(objagg_hints); +- if (IS_ERR(stats)) ++ if (IS_ERR(stats)) { ++ *errmsg = "objagg_hints_stats_get() failed."; + return PTR_ERR(stats); ++ } + err = __check_expect_stats(stats, expect_stats, errmsg); + objagg_stats_put(stats); + return err; +diff --git a/mm/secretmem.c b/mm/secretmem.c +index 399552814fd0ff..4bedf491a8a742 100644 +--- a/mm/secretmem.c ++++ b/mm/secretmem.c +@@ -195,19 +195,10 @@ static struct file *secretmem_file_create(unsigned long flags) + struct file *file; + struct inode *inode; + const char *anon_name = "[secretmem]"; +- const struct qstr qname = QSTR_INIT(anon_name, strlen(anon_name)); +- int err; + +- inode = alloc_anon_inode(secretmem_mnt->mnt_sb); ++ inode = anon_inode_make_secure_inode(secretmem_mnt->mnt_sb, anon_name, NULL); + if (IS_ERR(inode)) + return ERR_CAST(inode); +- +- err = security_inode_init_security_anon(inode, &qname, NULL); +- if (err) { +- file = ERR_PTR(err); +- goto err_free_inode; +- } +- + file = alloc_file_pseudo(inode, secretmem_mnt, "secretmem", + O_RDWR, &secretmem_fops); + if (IS_ERR(file)) +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c +index 32f7bd0e891689..824208a53c251e 100644 +--- a/net/bluetooth/hci_core.c ++++ b/net/bluetooth/hci_core.c +@@ -65,7 +65,7 @@ static DEFINE_IDA(hci_index_ida); + + /* Get HCI device by index. + * Device is held on return. */ +-struct hci_dev *hci_dev_get(int index) ++static struct hci_dev *__hci_dev_get(int index, int *srcu_index) + { + struct hci_dev *hdev = NULL, *d; + +@@ -78,6 +78,8 @@ struct hci_dev *hci_dev_get(int index) + list_for_each_entry(d, &hci_dev_list, list) { + if (d->id == index) { + hdev = hci_dev_hold(d); ++ if (srcu_index) ++ *srcu_index = srcu_read_lock(&d->srcu); + break; + } + } +@@ -85,6 +87,22 @@ struct hci_dev *hci_dev_get(int index) + return hdev; + } + ++struct hci_dev *hci_dev_get(int index) ++{ ++ return __hci_dev_get(index, NULL); ++} ++ ++static struct hci_dev *hci_dev_get_srcu(int index, int *srcu_index) ++{ ++ return __hci_dev_get(index, srcu_index); ++} ++ ++static void hci_dev_put_srcu(struct hci_dev *hdev, int srcu_index) ++{ ++ srcu_read_unlock(&hdev->srcu, srcu_index); ++ hci_dev_put(hdev); ++} ++ + /* ---- Inquiry support ---- */ + + bool hci_discovery_active(struct hci_dev *hdev) +@@ -590,9 +608,9 @@ static int hci_dev_do_reset(struct hci_dev *hdev) + int hci_dev_reset(__u16 dev) + { + struct hci_dev *hdev; +- int err; ++ int err, srcu_index; + +- hdev = hci_dev_get(dev); ++ hdev = hci_dev_get_srcu(dev, &srcu_index); + if (!hdev) + return -ENODEV; + +@@ -614,7 +632,7 @@ int hci_dev_reset(__u16 dev) + err = hci_dev_do_reset(hdev); + + done: +- hci_dev_put(hdev); ++ hci_dev_put_srcu(hdev, srcu_index); + return err; + } + +@@ -2424,6 +2442,11 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) + if (!hdev) + return NULL; + ++ if (init_srcu_struct(&hdev->srcu)) { ++ kfree(hdev); ++ return NULL; ++ } ++ + hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); + hdev->esco_type = (ESCO_HV1); + hdev->link_mode = (HCI_LM_ACCEPT); +@@ -2670,6 +2693,9 @@ void hci_unregister_dev(struct hci_dev *hdev) + list_del(&hdev->list); + write_unlock(&hci_dev_list_lock); + ++ synchronize_srcu(&hdev->srcu); ++ cleanup_srcu_struct(&hdev->srcu); ++ + cancel_work_sync(&hdev->rx_work); + cancel_work_sync(&hdev->cmd_work); + cancel_work_sync(&hdev->tx_work); +diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c +index e92bc4ceb5adda..d602e9d8eff450 100644 +--- a/net/bluetooth/hci_sync.c ++++ b/net/bluetooth/hci_sync.c +@@ -2010,13 +2010,10 @@ static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk) + static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force) + { + struct adv_info *adv, *n; +- int err = 0; + + if (ext_adv_capable(hdev)) + /* Remove all existing sets */ +- err = hci_clear_adv_sets_sync(hdev, sk); +- if (ext_adv_capable(hdev)) +- return err; ++ return hci_clear_adv_sets_sync(hdev, sk); + + /* This is safe as long as there is no command send while the lock is + * held. +@@ -2044,13 +2041,11 @@ static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force) + static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance, + struct sock *sk) + { +- int err = 0; ++ int err; + + /* If we use extended advertising, instance has to be removed first. */ + if (ext_adv_capable(hdev)) +- err = hci_remove_ext_adv_instance_sync(hdev, instance, sk); +- if (ext_adv_capable(hdev)) +- return err; ++ return hci_remove_ext_adv_instance_sync(hdev, instance, sk); + + /* This is safe as long as there is no command send while the lock is + * held. +@@ -2149,16 +2144,13 @@ int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type) + int hci_disable_advertising_sync(struct hci_dev *hdev) + { + u8 enable = 0x00; +- int err = 0; + + /* If controller is not advertising we are done. */ + if (!hci_dev_test_flag(hdev, HCI_LE_ADV)) + return 0; + + if (ext_adv_capable(hdev)) +- err = hci_disable_ext_adv_instance_sync(hdev, 0x00); +- if (ext_adv_capable(hdev)) +- return err; ++ return hci_disable_ext_adv_instance_sync(hdev, 0x00); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, + sizeof(enable), &enable, HCI_CMD_TIMEOUT); +@@ -2526,6 +2518,10 @@ static int hci_pause_advertising_sync(struct hci_dev *hdev) + int err; + int old_state; + ++ /* If controller is not advertising we are done. */ ++ if (!hci_dev_test_flag(hdev, HCI_LE_ADV)) ++ return 0; ++ + /* If already been paused there is nothing to do. */ + if (hdev->advertising_paused) + return 0; +diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c +index 853d217cabc917..82fa8c28438f25 100644 +--- a/net/bluetooth/mgmt.c ++++ b/net/bluetooth/mgmt.c +@@ -1074,7 +1074,8 @@ static int mesh_send_done_sync(struct hci_dev *hdev, void *data) + struct mgmt_mesh_tx *mesh_tx; + + hci_dev_clear_flag(hdev, HCI_MESH_SENDING); +- hci_disable_advertising_sync(hdev); ++ if (list_empty(&hdev->adv_instances)) ++ hci_disable_advertising_sync(hdev); + mesh_tx = mgmt_mesh_next(hdev, NULL); + + if (mesh_tx) +@@ -2140,6 +2141,9 @@ static int set_mesh_sync(struct hci_dev *hdev, void *data) + else + hci_dev_clear_flag(hdev, HCI_MESH); + ++ hdev->le_scan_interval = __le16_to_cpu(cp->period); ++ hdev->le_scan_window = __le16_to_cpu(cp->window); ++ + len -= sizeof(*cp); + + /* If filters don't fit, forward all adv pkts */ +@@ -2154,6 +2158,7 @@ static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) + { + struct mgmt_cp_set_mesh *cp = data; + struct mgmt_pending_cmd *cmd; ++ __u16 period, window; + int err = 0; + + bt_dev_dbg(hdev, "sock %p", sk); +@@ -2167,6 +2172,23 @@ static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, + MGMT_STATUS_INVALID_PARAMS); + ++ /* Keep allowed ranges in sync with set_scan_params() */ ++ period = __le16_to_cpu(cp->period); ++ ++ if (period < 0x0004 || period > 0x4000) ++ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, ++ MGMT_STATUS_INVALID_PARAMS); ++ ++ window = __le16_to_cpu(cp->window); ++ ++ if (window < 0x0004 || window > 0x4000) ++ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, ++ MGMT_STATUS_INVALID_PARAMS); ++ ++ if (window > period) ++ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, ++ MGMT_STATUS_INVALID_PARAMS); ++ + hci_dev_lock(hdev); + + cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len); +@@ -6529,6 +6551,7 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev, + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, + MGMT_STATUS_NOT_SUPPORTED); + ++ /* Keep allowed ranges in sync with set_mesh() */ + interval = __le16_to_cpu(cp->interval); + + if (interval < 0x0004 || interval > 0x4000) +diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c +index 68952752b5990f..31c4f112345ea4 100644 +--- a/net/mac80211/chan.c ++++ b/net/mac80211/chan.c +@@ -89,11 +89,11 @@ ieee80211_chanctx_reserved_chandef(struct ieee80211_local *local, + + lockdep_assert_held(&local->chanctx_mtx); + ++ if (WARN_ON(!compat)) ++ return NULL; ++ + list_for_each_entry(link, &ctx->reserved_links, + reserved_chanctx_list) { +- if (!compat) +- compat = &link->reserved_chandef; +- + compat = cfg80211_chandef_compatible(&link->reserved_chandef, + compat); + if (!compat) +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h +index 04c876d78d3bf0..44aad3394084bd 100644 +--- a/net/mac80211/ieee80211_i.h ++++ b/net/mac80211/ieee80211_i.h +@@ -1186,6 +1186,15 @@ ieee80211_vif_get_shift(struct ieee80211_vif *vif) + return shift; + } + ++#define for_each_link_data(sdata, __link) \ ++ struct ieee80211_sub_if_data *__sdata = sdata; \ ++ for (int __link_id = 0; \ ++ __link_id < ARRAY_SIZE((__sdata)->link); __link_id++) \ ++ if ((!(__sdata)->vif.valid_links || \ ++ (__sdata)->vif.valid_links & BIT(__link_id)) && \ ++ ((__link) = sdata_dereference((__sdata)->link[__link_id], \ ++ (__sdata)))) ++ + static inline int + ieee80211_get_mbssid_beacon_len(struct cfg80211_mbssid_elems *elems, + struct cfg80211_rnr_elems *rnr_elems, +diff --git a/net/mac80211/link.c b/net/mac80211/link.c +index 16cbaea93fc32d..af4d2b2e9a26f8 100644 +--- a/net/mac80211/link.c ++++ b/net/mac80211/link.c +@@ -28,8 +28,16 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata, + if (link_id < 0) + link_id = 0; + +- rcu_assign_pointer(sdata->vif.link_conf[link_id], link_conf); +- rcu_assign_pointer(sdata->link[link_id], link); ++ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { ++ struct ieee80211_sub_if_data *ap_bss; ++ struct ieee80211_bss_conf *ap_bss_conf; ++ ++ ap_bss = container_of(sdata->bss, ++ struct ieee80211_sub_if_data, u.ap); ++ ap_bss_conf = sdata_dereference(ap_bss->vif.link_conf[link_id], ++ ap_bss); ++ memcpy(link_conf, ap_bss_conf, sizeof(*link_conf)); ++ } + + link->sdata = sdata; + link->link_id = link_id; +@@ -65,6 +73,9 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata, + + ieee80211_link_debugfs_add(link); + } ++ ++ rcu_assign_pointer(sdata->vif.link_conf[link_id], link_conf); ++ rcu_assign_pointer(sdata->link[link_id], link); + } + + void ieee80211_link_stop(struct ieee80211_link_data *link) +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c +index 5eb233f619817b..58665b6ae6354b 100644 +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -4419,6 +4419,10 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) + if (!multicast && + !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1)) + return false; ++ /* reject invalid/our STA address */ ++ if (!is_valid_ether_addr(hdr->addr2) || ++ ether_addr_equal(sdata->dev->dev_addr, hdr->addr2)) ++ return false; + if (!rx->sta) { + int rate_idx; + if (status->encoding != RX_ENC_LEGACY) +diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c +index fee772b4637c88..a7054546f52dfa 100644 +--- a/net/rose/rose_route.c ++++ b/net/rose/rose_route.c +@@ -497,22 +497,15 @@ void rose_rt_device_down(struct net_device *dev) + t = rose_node; + rose_node = rose_node->next; + +- for (i = 0; i < t->count; i++) { ++ for (i = t->count - 1; i >= 0; i--) { + if (t->neighbour[i] != s) + continue; + + t->count--; + +- switch (i) { +- case 0: +- t->neighbour[0] = t->neighbour[1]; +- fallthrough; +- case 1: +- t->neighbour[1] = t->neighbour[2]; +- break; +- case 2: +- break; +- } ++ memmove(&t->neighbour[i], &t->neighbour[i + 1], ++ sizeof(t->neighbour[0]) * ++ (t->count - i)); + } + + if (t->count <= 0) +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c +index df89790c459ad6..282423106f15d9 100644 +--- a/net/sched/sch_api.c ++++ b/net/sched/sch_api.c +@@ -779,15 +779,12 @@ static u32 qdisc_alloc_handle(struct net_device *dev) + + void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len) + { +- bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED; + const struct Qdisc_class_ops *cops; + unsigned long cl; + u32 parentid; + bool notify; + int drops; + +- if (n == 0 && len == 0) +- return; + drops = max_t(int, n, 0); + rcu_read_lock(); + while ((parentid = sch->parent)) { +@@ -796,17 +793,8 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len) + + if (sch->flags & TCQ_F_NOPARENT) + break; +- /* Notify parent qdisc only if child qdisc becomes empty. +- * +- * If child was empty even before update then backlog +- * counter is screwed and we skip notification because +- * parent class is already passive. +- * +- * If the original child was offloaded then it is allowed +- * to be seem as empty, so the parent is notified anyway. +- */ +- notify = !sch->q.qlen && !WARN_ON_ONCE(!n && +- !qdisc_is_offloaded); ++ /* Notify parent qdisc only if child qdisc becomes empty. */ ++ notify = !sch->q.qlen; + /* TODO: perform the search on a per txq basis */ + sch = qdisc_lookup_rcu(qdisc_dev(sch), TC_H_MAJ(parentid)); + if (sch == NULL) { +@@ -815,6 +803,9 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len) + } + cops = sch->ops->cl_ops; + if (notify && cops->qlen_notify) { ++ /* Note that qlen_notify must be idempotent as it may get called ++ * multiple times. ++ */ + cl = cops->find(sch, parentid); + cops->qlen_notify(sch, cl); + } +diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c +index b370070194fa4a..7eccd6708d6649 100644 +--- a/net/vmw_vsock/vmci_transport.c ++++ b/net/vmw_vsock/vmci_transport.c +@@ -119,6 +119,8 @@ vmci_transport_packet_init(struct vmci_transport_packet *pkt, + u16 proto, + struct vmci_handle handle) + { ++ memset(pkt, 0, sizeof(*pkt)); ++ + /* We register the stream control handler as an any cid handle so we + * must always send from a source address of VMADDR_CID_ANY + */ +@@ -131,8 +133,6 @@ vmci_transport_packet_init(struct vmci_transport_packet *pkt, + pkt->type = type; + pkt->src_port = src->svm_port; + pkt->dst_port = dst->svm_port; +- memset(&pkt->proto, 0, sizeof(pkt->proto)); +- memset(&pkt->_reserved2, 0, sizeof(pkt->_reserved2)); + + switch (pkt->type) { + case VMCI_TRANSPORT_PACKET_TYPE_INVALID: +diff --git a/sound/isa/sb/sb16_main.c b/sound/isa/sb/sb16_main.c +index a9b87e159b2d11..1497a7822eee68 100644 +--- a/sound/isa/sb/sb16_main.c ++++ b/sound/isa/sb/sb16_main.c +@@ -703,6 +703,9 @@ static int snd_sb16_dma_control_put(struct snd_kcontrol *kcontrol, struct snd_ct + unsigned char nval, oval; + int change; + ++ if (chip->mode & (SB_MODE_PLAYBACK | SB_MODE_CAPTURE)) ++ return -EBUSY; ++ + nval = ucontrol->value.enumerated.item[0]; + if (nval > 2) + return -EINVAL; +@@ -711,6 +714,10 @@ static int snd_sb16_dma_control_put(struct snd_kcontrol *kcontrol, struct snd_ct + change = nval != oval; + snd_sb16_set_dma_mode(chip, nval); + spin_unlock_irqrestore(&chip->reg_lock, flags); ++ if (change) { ++ snd_dma_disable(chip->dma8); ++ snd_dma_disable(chip->dma16); ++ } + return change; + } + +diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c +index 40e2b5a87916a8..429e61d47ffbbe 100644 +--- a/sound/soc/amd/yc/acp6x-mach.c ++++ b/sound/soc/amd/yc/acp6x-mach.c +@@ -451,6 +451,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 17 D7VEK"), + } + }, ++ { ++ .driver_data = &acp6x_card, ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 17 D7VF"), ++ } ++ }, + { + .driver_data = &acp6x_card, + .matches = { +@@ -514,6 +521,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16z-n000"), + } + }, ++ { ++ .driver_data = &acp6x_card, ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "HP"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Victus by HP Gaming Laptop 15-fb2xxx"), ++ } ++ }, + { + .driver_data = &acp6x_card, + .matches = { diff --git a/patch/kernel/archive/spacemit-6.6/patch-6.6.97-98.patch b/patch/kernel/archive/spacemit-6.6/patch-6.6.97-98.patch new file mode 100644 index 000000000..b76fa95ca --- /dev/null +++ b/patch/kernel/archive/spacemit-6.6/patch-6.6.97-98.patch @@ -0,0 +1,25 @@ +diff --git a/Makefile b/Makefile +index 9d5c08363637bd..0bb5c23c640616 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 97 ++SUBLEVEL = 98 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index 1180689a239037..f6690df70b43ea 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -547,6 +547,7 @@ static bool amd_check_tsa_microcode(void) + + p.ext_fam = c->x86 - 0xf; + p.model = c->x86_model; ++ p.ext_model = c->x86_model >> 4; + p.stepping = c->x86_stepping; + + if (cpu_has(c, X86_FEATURE_ZEN3) || diff --git a/patch/kernel/archive/spacemit-6.6/patch-6.6.98-99.patch b/patch/kernel/archive/spacemit-6.6/patch-6.6.98-99.patch new file mode 100644 index 000000000..d74b0abd2 --- /dev/null +++ b/patch/kernel/archive/spacemit-6.6/patch-6.6.98-99.patch @@ -0,0 +1,6062 @@ +diff --git a/Documentation/bpf/map_hash.rst b/Documentation/bpf/map_hash.rst +index d2343952f2cbd3..8606bf958a8cf0 100644 +--- a/Documentation/bpf/map_hash.rst ++++ b/Documentation/bpf/map_hash.rst +@@ -233,10 +233,16 @@ attempts in order to enforce the LRU property which have increasing impacts on + other CPUs involved in the following operation attempts: + + - Attempt to use CPU-local state to batch operations +-- Attempt to fetch free nodes from global lists ++- Attempt to fetch ``target_free`` free nodes from global lists + - Attempt to pull any node from a global list and remove it from the hashmap + - Attempt to pull any node from any CPU's list and remove it from the hashmap + ++The number of nodes to borrow from the global list in a batch, ``target_free``, ++depends on the size of the map. Larger batch size reduces lock contention, but ++may also exhaust the global structure. The value is computed at map init to ++avoid exhaustion, by limiting aggregate reservation by all CPUs to half the map ++size. With a minimum of a single element and maximum budget of 128 at a time. ++ + This algorithm is described visually in the following diagram. See the + description in commit 3a08c2fd7634 ("bpf: LRU List") for a full explanation of + the corresponding operations: +diff --git a/Documentation/bpf/map_lru_hash_update.dot b/Documentation/bpf/map_lru_hash_update.dot +index a0fee349d29c27..ab10058f5b79f5 100644 +--- a/Documentation/bpf/map_lru_hash_update.dot ++++ b/Documentation/bpf/map_lru_hash_update.dot +@@ -35,18 +35,18 @@ digraph { + fn_bpf_lru_list_pop_free_to_local [shape=rectangle,fillcolor=2, + label="Flush local pending, + Rotate Global list, move +- LOCAL_FREE_TARGET ++ target_free + from global -> local"] + // Also corresponds to: + // fn__local_list_flush() + // fn_bpf_lru_list_rotate() + fn___bpf_lru_node_move_to_free[shape=diamond,fillcolor=2, +- label="Able to free\nLOCAL_FREE_TARGET\nnodes?"] ++ label="Able to free\ntarget_free\nnodes?"] + + fn___bpf_lru_list_shrink_inactive [shape=rectangle,fillcolor=3, + label="Shrink inactive list + up to remaining +- LOCAL_FREE_TARGET ++ target_free + (global LRU -> local)"] + fn___bpf_lru_list_shrink [shape=diamond,fillcolor=2, + label="> 0 entries in\nlocal free list?"] +diff --git a/Makefile b/Makefile +index 0bb5c23c640616..2aede51d98ea36 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 98 ++SUBLEVEL = 99 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c +index 2baa8d4a33ed3d..1a068859a41850 100644 +--- a/arch/um/drivers/vector_kern.c ++++ b/arch/um/drivers/vector_kern.c +@@ -1600,35 +1600,19 @@ static void vector_eth_configure( + + device->dev = dev; + +- *vp = ((struct vector_private) +- { +- .list = LIST_HEAD_INIT(vp->list), +- .dev = dev, +- .unit = n, +- .options = get_transport_options(def), +- .rx_irq = 0, +- .tx_irq = 0, +- .parsed = def, +- .max_packet = get_mtu(def) + ETH_HEADER_OTHER, +- /* TODO - we need to calculate headroom so that ip header +- * is 16 byte aligned all the time +- */ +- .headroom = get_headroom(def), +- .form_header = NULL, +- .verify_header = NULL, +- .header_rxbuffer = NULL, +- .header_txbuffer = NULL, +- .header_size = 0, +- .rx_header_size = 0, +- .rexmit_scheduled = false, +- .opened = false, +- .transport_data = NULL, +- .in_write_poll = false, +- .coalesce = 2, +- .req_size = get_req_size(def), +- .in_error = false, +- .bpf = NULL +- }); ++ INIT_LIST_HEAD(&vp->list); ++ vp->dev = dev; ++ vp->unit = n; ++ vp->options = get_transport_options(def); ++ vp->parsed = def; ++ vp->max_packet = get_mtu(def) + ETH_HEADER_OTHER; ++ /* ++ * TODO - we need to calculate headroom so that ip header ++ * is 16 byte aligned all the time ++ */ ++ vp->headroom = get_headroom(def); ++ vp->coalesce = 2; ++ vp->req_size = get_req_size(def); + + dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST); + INIT_WORK(&vp->reset_tx, vector_reset_tx); +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index caa6adcedc18dd..2b5b7d9a24e98c 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -128,7 +128,7 @@ config X86 + select ARCH_WANTS_DYNAMIC_TASK_STRUCT + select ARCH_WANTS_NO_INSTR + select ARCH_WANT_GENERAL_HUGETLB +- select ARCH_WANT_HUGE_PMD_SHARE ++ select ARCH_WANT_HUGE_PMD_SHARE if X86_64 + select ARCH_WANT_LD_ORPHAN_WARN + select ARCH_WANT_OPTIMIZE_DAX_VMEMMAP if X86_64 + select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP if X86_64 +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h +index 9fbad4cb971bff..03385545758159 100644 +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -575,6 +575,7 @@ + #define MSR_AMD64_OSVW_STATUS 0xc0010141 + #define MSR_AMD_PPIN_CTL 0xc00102f0 + #define MSR_AMD_PPIN 0xc00102f1 ++#define MSR_AMD64_CPUID_FN_7 0xc0011002 + #define MSR_AMD64_CPUID_FN_1 0xc0011004 + #define MSR_AMD64_LS_CFG 0xc0011020 + #define MSR_AMD64_DC_CFG 0xc0011022 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index f6690df70b43ea..5fcdfbb792bd9f 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -1154,6 +1154,13 @@ static void init_amd_zen2(struct cpuinfo_x86 *c) + { + fix_erratum_1386(c); + zen2_zenbleed_check(c); ++ ++ /* Disable RDSEED on AMD Cyan Skillfish because of an error. */ ++ if (c->x86_model == 0x47 && c->x86_stepping == 0x0) { ++ clear_cpu_cap(c, X86_FEATURE_RDSEED); ++ msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18); ++ pr_emerg("RDSEED is not reliable on this platform; disabling.\n"); ++ } + } + + static void init_amd_zen3(struct cpuinfo_x86 *c) +diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c +index c267f43de39eab..b89c5a38540341 100644 +--- a/arch/x86/kernel/cpu/mce/amd.c ++++ b/arch/x86/kernel/cpu/mce/amd.c +@@ -335,7 +335,6 @@ static void smca_configure(unsigned int bank, unsigned int cpu) + + struct thresh_restart { + struct threshold_block *b; +- int reset; + int set_lvt_off; + int lvt_off; + u16 old_limit; +@@ -430,13 +429,13 @@ static void threshold_restart_bank(void *_tr) + + rdmsr(tr->b->address, lo, hi); + +- if (tr->b->threshold_limit < (hi & THRESHOLD_MAX)) +- tr->reset = 1; /* limit cannot be lower than err count */ +- +- if (tr->reset) { /* reset err count and overflow bit */ +- hi = +- (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | +- (THRESHOLD_MAX - tr->b->threshold_limit); ++ /* ++ * Reset error count and overflow bit. ++ * This is done during init or after handling an interrupt. ++ */ ++ if (hi & MASK_OVERFLOW_HI || tr->set_lvt_off) { ++ hi &= ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI); ++ hi |= THRESHOLD_MAX - tr->b->threshold_limit; + } else if (tr->old_limit) { /* change limit w/o reset */ + int new_count = (hi & THRESHOLD_MAX) + + (tr->old_limit - tr->b->threshold_limit); +@@ -1049,13 +1048,20 @@ static const char *get_name(unsigned int cpu, unsigned int bank, struct threshol + } + + bank_type = smca_get_bank_type(cpu, bank); +- if (bank_type >= N_SMCA_BANK_TYPES) +- return NULL; + + if (b && (bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2)) { + if (b->block < ARRAY_SIZE(smca_umc_block_names)) + return smca_umc_block_names[b->block]; +- return NULL; ++ } ++ ++ if (b && b->block) { ++ snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN, "th_block_%u", b->block); ++ return buf_mcatype; ++ } ++ ++ if (bank_type >= N_SMCA_BANK_TYPES) { ++ snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN, "th_bank_%u", bank); ++ return buf_mcatype; + } + + if (per_cpu(smca_bank_counts, cpu)[bank_type] == 1) +diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c +index e103c227acd3ae..106436ec3c005e 100644 +--- a/arch/x86/kernel/cpu/mce/core.c ++++ b/arch/x86/kernel/cpu/mce/core.c +@@ -2704,15 +2704,9 @@ static int mce_cpu_dead(unsigned int cpu) + static int mce_cpu_online(unsigned int cpu) + { + struct timer_list *t = this_cpu_ptr(&mce_timer); +- int ret; + + mce_device_create(cpu); +- +- ret = mce_threshold_create_device(cpu); +- if (ret) { +- mce_device_remove(cpu); +- return ret; +- } ++ mce_threshold_create_device(cpu); + mce_reenable_cpu(); + mce_start_timer(t); + return 0; +diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c +index f5323551c1a9a9..0a9ce4f8a88cf5 100644 +--- a/arch/x86/kernel/cpu/mce/intel.c ++++ b/arch/x86/kernel/cpu/mce/intel.c +@@ -517,6 +517,7 @@ void mce_intel_feature_init(struct cpuinfo_x86 *c) + void mce_intel_feature_clear(struct cpuinfo_x86 *c) + { + intel_clear_lmce(); ++ cmci_clear(); + } + + bool intel_filter_mce(struct mce *m) +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c +index 99e72b8a96ac0b..67c01bd332f6f0 100644 +--- a/arch/x86/kvm/svm/sev.c ++++ b/arch/x86/kvm/svm/sev.c +@@ -1782,6 +1782,10 @@ static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src) + struct kvm_vcpu *src_vcpu; + unsigned long i; + ++ if (src->created_vcpus != atomic_read(&src->online_vcpus) || ++ dst->created_vcpus != atomic_read(&dst->online_vcpus)) ++ return -EBUSY; ++ + if (!sev_es_guest(src)) + return 0; + +diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c +index 0ea6016ad132a2..c4a158758cb740 100644 +--- a/arch/x86/kvm/xen.c ++++ b/arch/x86/kvm/xen.c +@@ -1737,8 +1737,19 @@ int kvm_xen_setup_evtchn(struct kvm *kvm, + { + struct kvm_vcpu *vcpu; + +- if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm)) +- return -EINVAL; ++ /* ++ * Don't check for the port being within range of max_evtchn_port(). ++ * Userspace can configure what ever targets it likes; events just won't ++ * be delivered if/while the target is invalid, just like userspace can ++ * configure MSIs which target non-existent APICs. ++ * ++ * This allow on Live Migration and Live Update, the IRQ routing table ++ * can be restored *independently* of other things like creating vCPUs, ++ * without imposing an ordering dependency on userspace. In this ++ * particular case, the problematic ordering would be with setting the ++ * Xen 'long mode' flag, which changes max_evtchn_port() to allow 4096 ++ * instead of 1024 event channels. ++ */ + + /* We only support 2 level event channels for now */ + if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) +diff --git a/crypto/ecc.c b/crypto/ecc.c +index 21504280aca2e5..27fcecf3a61fb1 100644 +--- a/crypto/ecc.c ++++ b/crypto/ecc.c +@@ -69,7 +69,7 @@ EXPORT_SYMBOL(ecc_get_curve); + void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes, + u64 *out, unsigned int ndigits) + { +- int diff = ndigits - DIV_ROUND_UP(nbytes, sizeof(u64)); ++ int diff = ndigits - DIV_ROUND_UP_POW2(nbytes, sizeof(u64)); + unsigned int o = nbytes & 7; + __be64 msd = 0; + +diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c +index cd3cbb7a36f855..e3cbaf3c3bbc15 100644 +--- a/drivers/acpi/battery.c ++++ b/drivers/acpi/battery.c +@@ -243,23 +243,10 @@ static int acpi_battery_get_property(struct power_supply *psy, + break; + case POWER_SUPPLY_PROP_CURRENT_NOW: + case POWER_SUPPLY_PROP_POWER_NOW: +- if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) { ++ if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) + ret = -ENODEV; +- break; +- } +- +- val->intval = battery->rate_now * 1000; +- /* +- * When discharging, the current should be reported as a +- * negative number as per the power supply class interface +- * definition. +- */ +- if (psp == POWER_SUPPLY_PROP_CURRENT_NOW && +- (battery->state & ACPI_BATTERY_STATE_DISCHARGING) && +- acpi_battery_handle_discharging(battery) +- == POWER_SUPPLY_STATUS_DISCHARGING) +- val->intval = -val->intval; +- ++ else ++ val->intval = battery->rate_now * 1000; + break; + case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: + case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: +diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c +index a876024d8a05f9..63d41320cd5cf0 100644 +--- a/drivers/atm/idt77252.c ++++ b/drivers/atm/idt77252.c +@@ -852,6 +852,8 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc, + + IDT77252_PRV_PADDR(skb) = dma_map_single(&card->pcidev->dev, skb->data, + skb->len, DMA_TO_DEVICE); ++ if (dma_mapping_error(&card->pcidev->dev, IDT77252_PRV_PADDR(skb))) ++ return -ENOMEM; + + error = -EINVAL; + +@@ -1857,6 +1859,8 @@ add_rx_skb(struct idt77252_dev *card, int queue, + paddr = dma_map_single(&card->pcidev->dev, skb->data, + skb_end_pointer(skb) - skb->data, + DMA_FROM_DEVICE); ++ if (dma_mapping_error(&card->pcidev->dev, paddr)) ++ goto outpoolrm; + IDT77252_PRV_PADDR(skb) = paddr; + + if (push_rx_skb(card, skb, queue)) { +@@ -1871,6 +1875,7 @@ add_rx_skb(struct idt77252_dev *card, int queue, + dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb), + skb_end_pointer(skb) - skb->data, DMA_FROM_DEVICE); + ++outpoolrm: + handle = IDT77252_PRV_POOL(skb); + card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL; + +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c +index 2203686156bfe0..3742ddf46c55ae 100644 +--- a/drivers/block/nbd.c ++++ b/drivers/block/nbd.c +@@ -2120,9 +2120,7 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) + goto out; + } + } +- ret = nbd_start_device(nbd); +- if (ret) +- goto out; ++ + if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) { + nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER], + GFP_KERNEL); +@@ -2138,6 +2136,8 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) + goto out; + } + set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags); ++ ++ ret = nbd_start_device(nbd); + out: + mutex_unlock(&nbd->config_lock); + if (!ret) { +diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c +index df3e5aab4b5ac9..8c873a8e39cd93 100644 +--- a/drivers/block/ublk_drv.c ++++ b/drivers/block/ublk_drv.c +@@ -2323,7 +2323,8 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd) + if (copy_from_user(&info, argp, sizeof(info))) + return -EFAULT; + +- if (info.queue_depth > UBLK_MAX_QUEUE_DEPTH || info.nr_hw_queues > UBLK_MAX_NR_QUEUES) ++ if (info.queue_depth > UBLK_MAX_QUEUE_DEPTH || !info.queue_depth || ++ info.nr_hw_queues > UBLK_MAX_NR_QUEUES || !info.nr_hw_queues) + return -EINVAL; + + if (capable(CAP_SYS_ADMIN)) +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c +index 186f1fee753403..db8f1dadaa9f4f 100644 +--- a/drivers/char/ipmi/ipmi_msghandler.c ++++ b/drivers/char/ipmi/ipmi_msghandler.c +@@ -1241,7 +1241,7 @@ int ipmi_create_user(unsigned int if_num, + } + /* Not found, return an error */ + rv = -EINVAL; +- goto out_kfree; ++ goto out_unlock; + + found: + if (atomic_add_return(1, &intf->nr_users) > max_users) { +@@ -1283,6 +1283,7 @@ int ipmi_create_user(unsigned int if_num, + + out_kfree: + atomic_dec(&intf->nr_users); ++out_unlock: + srcu_read_unlock(&ipmi_interfaces_srcu, index); + vfree(new_user); + return rv; +diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c +index 1955eaeba0ab7c..4ef05c8c75e652 100644 +--- a/drivers/gpu/drm/drm_framebuffer.c ++++ b/drivers/gpu/drm/drm_framebuffer.c +@@ -844,11 +844,23 @@ void drm_framebuffer_free(struct kref *kref) + int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, + const struct drm_framebuffer_funcs *funcs) + { ++ unsigned int i; + int ret; ++ bool exists; + + if (WARN_ON_ONCE(fb->dev != dev || !fb->format)) + return -EINVAL; + ++ for (i = 0; i < fb->format->num_planes; i++) { ++ if (drm_WARN_ON_ONCE(dev, fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i))) ++ fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); ++ if (fb->obj[i]) { ++ exists = drm_gem_object_handle_get_if_exists_unlocked(fb->obj[i]); ++ if (exists) ++ fb->internal_flags |= DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); ++ } ++ } ++ + INIT_LIST_HEAD(&fb->filp_head); + + fb->funcs = funcs; +@@ -857,7 +869,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, + ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB, + false, drm_framebuffer_free); + if (ret) +- goto out; ++ goto err; + + mutex_lock(&dev->mode_config.fb_lock); + dev->mode_config.num_fb++; +@@ -865,7 +877,16 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, + mutex_unlock(&dev->mode_config.fb_lock); + + drm_mode_object_register(dev, &fb->base); +-out: ++ ++ return 0; ++ ++err: ++ for (i = 0; i < fb->format->num_planes; i++) { ++ if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) { ++ drm_gem_object_handle_put_unlocked(fb->obj[i]); ++ fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); ++ } ++ } + return ret; + } + EXPORT_SYMBOL(drm_framebuffer_init); +@@ -942,6 +963,12 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private); + void drm_framebuffer_cleanup(struct drm_framebuffer *fb) + { + struct drm_device *dev = fb->dev; ++ unsigned int i; ++ ++ for (i = 0; i < fb->format->num_planes; i++) { ++ if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) ++ drm_gem_object_handle_put_unlocked(fb->obj[i]); ++ } + + mutex_lock(&dev->mode_config.fb_lock); + list_del(&fb->head); +diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c +index deb93f78ce3442..a3370c77e949dc 100644 +--- a/drivers/gpu/drm/drm_gem.c ++++ b/drivers/gpu/drm/drm_gem.c +@@ -186,6 +186,46 @@ void drm_gem_private_object_fini(struct drm_gem_object *obj) + } + EXPORT_SYMBOL(drm_gem_private_object_fini); + ++static void drm_gem_object_handle_get(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ ++ drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock)); ++ ++ if (obj->handle_count++ == 0) ++ drm_gem_object_get(obj); ++} ++ ++/** ++ * drm_gem_object_handle_get_if_exists_unlocked - acquire reference on user-space handle, if any ++ * @obj: GEM object ++ * ++ * Acquires a reference on the GEM buffer object's handle. Required to keep ++ * the GEM object alive. Call drm_gem_object_handle_put_if_exists_unlocked() ++ * to release the reference. Does nothing if the buffer object has no handle. ++ * ++ * Returns: ++ * True if a handle exists, or false otherwise ++ */ ++bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ ++ guard(mutex)(&dev->object_name_lock); ++ ++ /* ++ * First ref taken during GEM object creation, if any. Some ++ * drivers set up internal framebuffers with GEM objects that ++ * do not have a GEM handle. Hence, this counter can be zero. ++ */ ++ if (!obj->handle_count) ++ return false; ++ ++ drm_gem_object_handle_get(obj); ++ ++ return true; ++} ++ + /** + * drm_gem_object_handle_free - release resources bound to userspace handles + * @obj: GEM object to clean up. +@@ -216,20 +256,26 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) + } + } + +-static void +-drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) ++/** ++ * drm_gem_object_handle_put_unlocked - releases reference on user-space handle ++ * @obj: GEM object ++ * ++ * Releases a reference on the GEM buffer object's handle. Possibly releases ++ * the GEM buffer object and associated dma-buf objects. ++ */ ++void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) + { + struct drm_device *dev = obj->dev; + bool final = false; + +- if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) ++ if (drm_WARN_ON(dev, READ_ONCE(obj->handle_count) == 0)) + return; + + /* +- * Must bump handle count first as this may be the last +- * ref, in which case the object would disappear before we +- * checked for a name +- */ ++ * Must bump handle count first as this may be the last ++ * ref, in which case the object would disappear before ++ * we checked for a name. ++ */ + + mutex_lock(&dev->object_name_lock); + if (--obj->handle_count == 0) { +@@ -253,6 +299,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) + struct drm_file *file_priv = data; + struct drm_gem_object *obj = ptr; + ++ if (drm_WARN_ON(obj->dev, !data)) ++ return 0; ++ + if (obj->funcs->close) + obj->funcs->close(obj, file_priv); + +@@ -363,8 +412,8 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, + int ret; + + WARN_ON(!mutex_is_locked(&dev->object_name_lock)); +- if (obj->handle_count++ == 0) +- drm_gem_object_get(obj); ++ ++ drm_gem_object_handle_get(obj); + + /* + * Get the user-visible handle using idr. Preload and perform +@@ -373,7 +422,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, + idr_preload(GFP_KERNEL); + spin_lock(&file_priv->table_lock); + +- ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); ++ ret = idr_alloc(&file_priv->object_idr, NULL, 1, 0, GFP_NOWAIT); + + spin_unlock(&file_priv->table_lock); + idr_preload_end(); +@@ -394,6 +443,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, + goto err_revoke; + } + ++ /* mirrors drm_gem_handle_delete to avoid races */ ++ spin_lock(&file_priv->table_lock); ++ obj = idr_replace(&file_priv->object_idr, obj, handle); ++ WARN_ON(obj != NULL); ++ spin_unlock(&file_priv->table_lock); + *handlep = handle; + return 0; + +diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h +index 0ef5fc2a61f194..8d433fe37e8fa8 100644 +--- a/drivers/gpu/drm/drm_internal.h ++++ b/drivers/gpu/drm/drm_internal.h +@@ -155,6 +155,8 @@ void drm_sysfs_lease_event(struct drm_device *dev); + + /* drm_gem.c */ + int drm_gem_init(struct drm_device *dev); ++bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj); ++void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj); + int drm_gem_handle_create_tail(struct drm_file *file_priv, + struct drm_gem_object *obj, + u32 *handlep); +diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c +index 0156a5e9443594..5f8e5e87d7cd63 100644 +--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c ++++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c +@@ -601,6 +601,10 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id) + if (!ctx->drm_dev) + goto out; + ++ /* check if crtc and vblank have been initialized properly */ ++ if (!drm_dev_has_vblank(ctx->drm_dev)) ++ goto out; ++ + if (!ctx->i80_if) { + drm_crtc_handle_vblank(&ctx->crtc->base); + +diff --git a/drivers/gpu/drm/tegra/nvdec.c b/drivers/gpu/drm/tegra/nvdec.c +index 4860790666af51..14ef61b44f47cd 100644 +--- a/drivers/gpu/drm/tegra/nvdec.c ++++ b/drivers/gpu/drm/tegra/nvdec.c +@@ -261,10 +261,8 @@ static int nvdec_load_falcon_firmware(struct nvdec *nvdec) + + if (!client->group) { + virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL); +- +- err = dma_mapping_error(nvdec->dev, iova); +- if (err < 0) +- return err; ++ if (!virt) ++ return -ENOMEM; + } else { + virt = tegra_drm_alloc(tegra, size, &iova); + if (IS_ERR(virt)) +diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c +index 0b3f4267130c45..64606104551cd8 100644 +--- a/drivers/gpu/drm/ttm/ttm_bo_util.c ++++ b/drivers/gpu/drm/ttm/ttm_bo_util.c +@@ -254,6 +254,13 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, + ret = dma_resv_trylock(&fbo->base.base._resv); + WARN_ON(!ret); + ++ ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1); ++ if (ret) { ++ dma_resv_unlock(&fbo->base.base._resv); ++ kfree(fbo); ++ return ret; ++ } ++ + if (fbo->base.resource) { + ttm_resource_set_bo(fbo->base.resource, &fbo->base); + bo->resource = NULL; +@@ -262,12 +269,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, + fbo->base.bulk_move = NULL; + } + +- ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1); +- if (ret) { +- kfree(fbo); +- return ret; +- } +- + ttm_bo_get(bo); + fbo->bo = bo; + +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index a8665d57094b22..0d1d7162814f32 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -305,6 +305,8 @@ + #define USB_DEVICE_ID_ASUS_AK1D 0x1125 + #define USB_DEVICE_ID_CHICONY_TOSHIBA_WT10A 0x1408 + #define USB_DEVICE_ID_CHICONY_ACER_SWITCH12 0x1421 ++#define USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA 0xb824 ++#define USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA2 0xb82c + + #define USB_VENDOR_ID_CHUNGHWAT 0x2247 + #define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001 +@@ -807,6 +809,7 @@ + #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067 + #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 + #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3 ++#define USB_DEVICE_ID_LENOVO_X1_TAB2 0x60a4 + #define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5 + #define USB_DEVICE_ID_LENOVO_X12_TAB 0x60fe + #define USB_DEVICE_ID_LENOVO_X12_TAB2 0x61ae +@@ -1501,4 +1504,7 @@ + #define USB_VENDOR_ID_SIGNOTEC 0x2133 + #define USB_DEVICE_ID_SIGNOTEC_VIEWSONIC_PD1011 0x0018 + ++#define USB_VENDOR_ID_SMARTLINKTECHNOLOGY 0x4c4a ++#define USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155 0x4155 ++ + #endif +diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c +index ee65da98c7d5b5..32cb2e75228c4b 100644 +--- a/drivers/hid/hid-lenovo.c ++++ b/drivers/hid/hid-lenovo.c +@@ -473,6 +473,7 @@ static int lenovo_input_mapping(struct hid_device *hdev, + return lenovo_input_mapping_tp10_ultrabook_kbd(hdev, hi, field, + usage, bit, max); + case USB_DEVICE_ID_LENOVO_X1_TAB: ++ case USB_DEVICE_ID_LENOVO_X1_TAB2: + case USB_DEVICE_ID_LENOVO_X1_TAB3: + return lenovo_input_mapping_x1_tab_kbd(hdev, hi, field, usage, bit, max); + default: +@@ -587,6 +588,7 @@ static ssize_t attr_fn_lock_store(struct device *dev, + break; + case USB_DEVICE_ID_LENOVO_TP10UBKBD: + case USB_DEVICE_ID_LENOVO_X1_TAB: ++ case USB_DEVICE_ID_LENOVO_X1_TAB2: + case USB_DEVICE_ID_LENOVO_X1_TAB3: + ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value); + if (ret) +@@ -782,6 +784,7 @@ static int lenovo_event(struct hid_device *hdev, struct hid_field *field, + return lenovo_event_cptkbd(hdev, field, usage, value); + case USB_DEVICE_ID_LENOVO_TP10UBKBD: + case USB_DEVICE_ID_LENOVO_X1_TAB: ++ case USB_DEVICE_ID_LENOVO_X1_TAB2: + case USB_DEVICE_ID_LENOVO_X1_TAB3: + return lenovo_event_tp10ubkbd(hdev, field, usage, value); + default: +@@ -1065,6 +1068,7 @@ static int lenovo_led_brightness_set(struct led_classdev *led_cdev, + break; + case USB_DEVICE_ID_LENOVO_TP10UBKBD: + case USB_DEVICE_ID_LENOVO_X1_TAB: ++ case USB_DEVICE_ID_LENOVO_X1_TAB2: + case USB_DEVICE_ID_LENOVO_X1_TAB3: + ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value); + break; +@@ -1296,6 +1300,7 @@ static int lenovo_probe(struct hid_device *hdev, + break; + case USB_DEVICE_ID_LENOVO_TP10UBKBD: + case USB_DEVICE_ID_LENOVO_X1_TAB: ++ case USB_DEVICE_ID_LENOVO_X1_TAB2: + case USB_DEVICE_ID_LENOVO_X1_TAB3: + ret = lenovo_probe_tp10ubkbd(hdev); + break; +@@ -1383,6 +1388,7 @@ static void lenovo_remove(struct hid_device *hdev) + break; + case USB_DEVICE_ID_LENOVO_TP10UBKBD: + case USB_DEVICE_ID_LENOVO_X1_TAB: ++ case USB_DEVICE_ID_LENOVO_X1_TAB2: + case USB_DEVICE_ID_LENOVO_X1_TAB3: + lenovo_remove_tp10ubkbd(hdev); + break; +@@ -1433,6 +1439,8 @@ static const struct hid_device_id lenovo_devices[] = { + */ + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB) }, ++ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, ++ USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB2) }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB3) }, + { } +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c +index 6386043aab0bbf..becd4c1ccf93c1 100644 +--- a/drivers/hid/hid-multitouch.c ++++ b/drivers/hid/hid-multitouch.c +@@ -2110,12 +2110,18 @@ static const struct hid_device_id mt_devices[] = { + HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC, + USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) }, + +- /* Lenovo X1 TAB Gen 2 */ ++ /* Lenovo X1 TAB Gen 1 */ + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, + HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_LENOVO, + USB_DEVICE_ID_LENOVO_X1_TAB) }, + ++ /* Lenovo X1 TAB Gen 2 */ ++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, ++ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, ++ USB_VENDOR_ID_LENOVO, ++ USB_DEVICE_ID_LENOVO_X1_TAB2) }, ++ + /* Lenovo X1 TAB Gen 3 */ + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, + HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c +index 73979643315bfd..80372342c176af 100644 +--- a/drivers/hid/hid-quirks.c ++++ b/drivers/hid/hid-quirks.c +@@ -747,6 +747,8 @@ static const struct hid_device_id hid_ignore_list[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, + { HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) }, +@@ -894,6 +896,7 @@ static const struct hid_device_id hid_ignore_list[] = { + #endif + { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) }, + { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_SMARTLINKTECHNOLOGY, USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155) }, + { } + }; + +diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c +index aad2d75c036781..14ead0fbb80fd4 100644 +--- a/drivers/input/keyboard/atkbd.c ++++ b/drivers/input/keyboard/atkbd.c +@@ -826,7 +826,7 @@ static int atkbd_probe(struct atkbd *atkbd) + + if (atkbd_skip_getid(atkbd)) { + atkbd->id = 0xab83; +- return 0; ++ goto deactivate_kbd; + } + + /* +@@ -863,6 +863,7 @@ static int atkbd_probe(struct atkbd *atkbd) + return -1; + } + ++deactivate_kbd: + /* + * Make sure nothing is coming from the keyboard and disturbs our + * internal state. +diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c +index 21decb97bc050b..1f1991634d0a21 100644 +--- a/drivers/md/md-bitmap.c ++++ b/drivers/md/md-bitmap.c +@@ -2119,8 +2119,7 @@ int md_bitmap_get_stats(struct bitmap *bitmap, struct md_bitmap_stats *stats) + + if (!bitmap) + return -ENOENT; +- if (!bitmap->mddev->bitmap_info.external && +- !bitmap->storage.sb_page) ++ if (!bitmap->storage.sb_page) + return -EINVAL; + sb = kmap_local_page(bitmap->storage.sb_page); + stats->sync_size = le64_to_cpu(sb->sync_size); +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c +index 8b25287c89ed6d..4c1f86ca55208c 100644 +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -3297,6 +3297,7 @@ static int raid1_reshape(struct mddev *mddev) + /* ok, everything is stopped */ + oldpool = conf->r1bio_pool; + conf->r1bio_pool = newpool; ++ init_waitqueue_head(&conf->r1bio_pool.wait); + + for (d = d2 = 0; d < conf->raid_disks; d++) { + struct md_rdev *rdev = conf->mirrors[d].rdev; +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c +index 36b6bf3f8b29fd..a75d090a7fa15a 100644 +--- a/drivers/md/raid10.c ++++ b/drivers/md/raid10.c +@@ -1205,8 +1205,11 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, + rcu_read_unlock(); + } + +- if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) ++ if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) { ++ raid_end_bio_io(r10_bio); + return; ++ } ++ + rdev = read_balance(conf, r10_bio, &max_sectors); + if (!rdev) { + if (err_rdev) { +@@ -1428,8 +1431,11 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, + } + + sectors = r10_bio->sectors; +- if (!regular_request_wait(mddev, conf, bio, sectors)) ++ if (!regular_request_wait(mddev, conf, bio, sectors)) { ++ raid_end_bio_io(r10_bio); + return; ++ } ++ + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && + (mddev->reshape_backwards + ? (bio->bi_iter.bi_sector < conf->reshape_safe && +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c +index ba7f7de25c8529..e6a74d66f0d8ca 100644 +--- a/drivers/net/can/m_can/m_can.c ++++ b/drivers/net/can/m_can/m_can.c +@@ -606,7 +606,7 @@ static int m_can_handle_lost_msg(struct net_device *dev) + struct can_frame *frame; + u32 timestamp = 0; + +- netdev_err(dev, "msg lost in rxf0\n"); ++ netdev_dbg(dev, "msg lost in rxf0\n"); + + stats->rx_errors++; + stats->rx_over_errors++; +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +index 63e0670383852a..1727e9bb1479d5 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +@@ -487,7 +487,9 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc) + + if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc) + return -EINVAL; ++ } + ++ for (i = 0; i < max_tc; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + break; +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +index 758f51366ef032..07a458ecb7cc93 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +@@ -115,7 +115,7 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp, + tx_buf->action = XDP_REDIRECT; + tx_buf->xdpf = xdpf; + dma_unmap_addr_set(tx_buf, mapping, mapping); +- dma_unmap_len_set(tx_buf, len, 0); ++ dma_unmap_len_set(tx_buf, len, len); + } + + void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) +diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h +index b3fc18db4f4c39..dfbda2e5ec88aa 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.h ++++ b/drivers/net/ethernet/ibm/ibmvnic.h +@@ -211,7 +211,6 @@ struct ibmvnic_statistics { + u8 reserved[72]; + } __packed __aligned(8); + +-#define NUM_TX_STATS 3 + struct ibmvnic_tx_queue_stats { + u64 batched_packets; + u64 direct_packets; +@@ -219,13 +218,18 @@ struct ibmvnic_tx_queue_stats { + u64 dropped_packets; + }; + +-#define NUM_RX_STATS 3 ++#define NUM_TX_STATS \ ++ (sizeof(struct ibmvnic_tx_queue_stats) / sizeof(u64)) ++ + struct ibmvnic_rx_queue_stats { + u64 packets; + u64 bytes; + u64 interrupts; + }; + ++#define NUM_RX_STATS \ ++ (sizeof(struct ibmvnic_rx_queue_stats) / sizeof(u64)) ++ + struct ibmvnic_acl_buffer { + __be32 len; + __be32 version; +diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c +index 9ed965d61e3554..d3c9a3020fbf6e 100644 +--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c ++++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c +@@ -28,6 +28,9 @@ static void mana_gd_init_pf_regs(struct pci_dev *pdev) + gc->db_page_base = gc->bar0_va + + mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF); + ++ gc->phys_db_page_base = gc->bar0_pa + ++ mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF); ++ + sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF); + + sriov_base_va = gc->bar0_va + sriov_base_off; +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +index dd2ab6185c40e8..05ea74e9379399 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +@@ -364,19 +364,17 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv, + } + + /* TX/RX NORMAL interrupts */ +- if (likely(intr_status & XGMAC_NIS)) { +- if (likely(intr_status & XGMAC_RI)) { +- u64_stats_update_begin(&stats->syncp); +- u64_stats_inc(&stats->rx_normal_irq_n[chan]); +- u64_stats_update_end(&stats->syncp); +- ret |= handle_rx; +- } +- if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { +- u64_stats_update_begin(&stats->syncp); +- u64_stats_inc(&stats->tx_normal_irq_n[chan]); +- u64_stats_update_end(&stats->syncp); +- ret |= handle_tx; +- } ++ if (likely(intr_status & XGMAC_RI)) { ++ u64_stats_update_begin(&stats->syncp); ++ u64_stats_inc(&stats->rx_normal_irq_n[chan]); ++ u64_stats_update_end(&stats->syncp); ++ ret |= handle_rx; ++ } ++ if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { ++ u64_stats_update_begin(&stats->syncp); ++ u64_stats_inc(&stats->tx_normal_irq_n[chan]); ++ u64_stats_update_end(&stats->syncp); ++ ret |= handle_tx; + } + + /* Clear interrupts */ +diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c +index c10f94d69dad3b..3d622634e82aad 100644 +--- a/drivers/net/ethernet/xilinx/ll_temac_main.c ++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c +@@ -1309,7 +1309,7 @@ ll_temac_ethtools_set_ringparam(struct net_device *ndev, + if (ering->rx_pending > RX_BD_NUM_MAX || + ering->rx_mini_pending || + ering->rx_jumbo_pending || +- ering->rx_pending > TX_BD_NUM_MAX) ++ ering->tx_pending > TX_BD_NUM_MAX) + return -EINVAL; + + if (netif_running(ndev)) +diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c +index 623607fd2cefd3..cb8306cd98260c 100644 +--- a/drivers/net/phy/microchip.c ++++ b/drivers/net/phy/microchip.c +@@ -310,7 +310,7 @@ static void lan88xx_link_change_notify(struct phy_device *phydev) + * As workaround, set to 10 before setting to 100 + * at forced 100 F/H mode. + */ +- if (!phydev->autoneg && phydev->speed == 100) { ++ if (phydev->state == PHY_NOLINK && !phydev->autoneg && phydev->speed == 100) { + /* disable phy interrupt */ + temp = phy_read(phydev, LAN88XX_INT_MASK); + temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_; +diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c +index c88edb19d2e710..4ca813c009476a 100644 +--- a/drivers/net/phy/smsc.c ++++ b/drivers/net/phy/smsc.c +@@ -155,10 +155,29 @@ static int smsc_phy_reset(struct phy_device *phydev) + + static int lan87xx_config_aneg(struct phy_device *phydev) + { +- int rc; ++ u8 mdix_ctrl; + int val; ++ int rc; ++ ++ /* When auto-negotiation is disabled (forced mode), the PHY's ++ * Auto-MDIX will continue toggling the TX/RX pairs. ++ * ++ * To establish a stable link, we must select a fixed MDI mode. ++ * If the user has not specified a fixed MDI mode (i.e., mdix_ctrl is ++ * 'auto'), we default to ETH_TP_MDI. This choice of a ETH_TP_MDI mode ++ * mirrors the behavior the hardware would exhibit if the AUTOMDIX_EN ++ * strap were configured for a fixed MDI connection. ++ */ ++ if (phydev->autoneg == AUTONEG_DISABLE) { ++ if (phydev->mdix_ctrl == ETH_TP_MDI_AUTO) ++ mdix_ctrl = ETH_TP_MDI; ++ else ++ mdix_ctrl = phydev->mdix_ctrl; ++ } else { ++ mdix_ctrl = phydev->mdix_ctrl; ++ } + +- switch (phydev->mdix_ctrl) { ++ switch (mdix_ctrl) { + case ETH_TP_MDI: + val = SPECIAL_CTRL_STS_OVRRD_AMDIX_; + break; +@@ -167,7 +186,8 @@ static int lan87xx_config_aneg(struct phy_device *phydev) + SPECIAL_CTRL_STS_AMDIX_STATE_; + break; + case ETH_TP_MDI_AUTO: +- val = SPECIAL_CTRL_STS_AMDIX_ENABLE_; ++ val = SPECIAL_CTRL_STS_OVRRD_AMDIX_ | ++ SPECIAL_CTRL_STS_AMDIX_ENABLE_; + break; + default: + return genphy_config_aneg(phydev); +@@ -183,7 +203,7 @@ static int lan87xx_config_aneg(struct phy_device *phydev) + rc |= val; + phy_write(phydev, SPECIAL_CTRL_STS, rc); + +- phydev->mdix = phydev->mdix_ctrl; ++ phydev->mdix = mdix_ctrl; + return genphy_config_aneg(phydev); + } + +@@ -261,6 +281,33 @@ int lan87xx_read_status(struct phy_device *phydev) + } + EXPORT_SYMBOL_GPL(lan87xx_read_status); + ++static int lan87xx_phy_config_init(struct phy_device *phydev) ++{ ++ int rc; ++ ++ /* The LAN87xx PHY's initial MDI-X mode is determined by the AUTOMDIX_EN ++ * hardware strap, but the driver cannot read the strap's status. This ++ * creates an unpredictable initial state. ++ * ++ * To ensure consistent and reliable behavior across all boards, ++ * override the strap configuration on initialization and force the PHY ++ * into a known state with Auto-MDIX enabled, which is the expected ++ * default for modern hardware. ++ */ ++ rc = phy_modify(phydev, SPECIAL_CTRL_STS, ++ SPECIAL_CTRL_STS_OVRRD_AMDIX_ | ++ SPECIAL_CTRL_STS_AMDIX_ENABLE_ | ++ SPECIAL_CTRL_STS_AMDIX_STATE_, ++ SPECIAL_CTRL_STS_OVRRD_AMDIX_ | ++ SPECIAL_CTRL_STS_AMDIX_ENABLE_); ++ if (rc < 0) ++ return rc; ++ ++ phydev->mdix_ctrl = ETH_TP_MDI_AUTO; ++ ++ return smsc_phy_config_init(phydev); ++} ++ + static int lan874x_phy_config_init(struct phy_device *phydev) + { + u16 val; +@@ -696,7 +743,7 @@ static struct phy_driver smsc_phy_driver[] = { + + /* basic functions */ + .read_status = lan87xx_read_status, +- .config_init = smsc_phy_config_init, ++ .config_init = lan87xx_phy_config_init, + .soft_reset = smsc_phy_reset, + .config_aneg = lan87xx_config_aneg, + +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index dc84d9029c2c79..3976bc4295dd19 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -1432,6 +1432,7 @@ static const struct usb_device_id products[] = { + {QMI_QUIRK_SET_DTR(0x22de, 0x9051, 2)}, /* Hucom Wireless HM-211S/K */ + {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ + {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ ++ {QMI_QUIRK_SET_DTR(0x1e0e, 0x9071, 3)}, /* SIMCom 8230C ++ */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */ +diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c +index 5d534e15a844f7..278875c02f41f8 100644 +--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c ++++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c +@@ -583,7 +583,11 @@ void zd_mac_tx_to_dev(struct sk_buff *skb, int error) + + skb_queue_tail(q, skb); + while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) { +- zd_mac_tx_status(hw, skb_dequeue(q), ++ skb = skb_dequeue(q); ++ if (!skb) ++ break; ++ ++ zd_mac_tx_status(hw, skb, + mac->ack_pending ? mac->ack_signal : 0, + NULL); + mac->ack_pending = 0; +diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c +index 75bff325a42519..ba38173d3ed3c5 100644 +--- a/drivers/pinctrl/pinctrl-amd.c ++++ b/drivers/pinctrl/pinctrl-amd.c +@@ -933,6 +933,17 @@ static int amd_gpio_suspend_hibernate_common(struct device *dev, bool is_suspend + pin, is_suspend ? "suspend" : "hibernate"); + } + ++ /* ++ * debounce enabled over suspend has shown issues with a GPIO ++ * being unable to wake the system, as we're only interested in ++ * the actual wakeup event, clear it. ++ */ ++ if (gpio_dev->saved_regs[i] & (DB_CNTRl_MASK << DB_CNTRL_OFF)) { ++ amd_gpio_set_debounce(gpio_dev, pin, 0); ++ pm_pr_dbg("Clearing debounce for GPIO #%d during %s.\n", ++ pin, is_suspend ? "suspend" : "hibernate"); ++ } ++ + raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); + } + +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c +index ed70767ca0f0c9..13dc8bc1d0cff2 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c +@@ -1031,6 +1031,25 @@ static bool msm_gpio_needs_dual_edge_parent_workaround(struct irq_data *d, + test_bit(d->hwirq, pctrl->skip_wake_irqs); + } + ++static void msm_gpio_irq_init_valid_mask(struct gpio_chip *gc, ++ unsigned long *valid_mask, ++ unsigned int ngpios) ++{ ++ struct msm_pinctrl *pctrl = gpiochip_get_data(gc); ++ const struct msm_pingroup *g; ++ int i; ++ ++ bitmap_fill(valid_mask, ngpios); ++ ++ for (i = 0; i < ngpios; i++) { ++ g = &pctrl->soc->groups[i]; ++ ++ if (g->intr_detection_width != 1 && ++ g->intr_detection_width != 2) ++ clear_bit(i, valid_mask); ++ } ++} ++ + static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) + { + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); +@@ -1392,6 +1411,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_bad_irq; + girq->parents[0] = pctrl->irq; ++ girq->init_valid_mask = msm_gpio_irq_init_valid_mask; + + ret = gpiochip_add_data(&pctrl->chip, pctrl); + if (ret) { +diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c +index 6b1a75b6bd12fb..ff7c70a0033d8a 100644 +--- a/drivers/pwm/pwm-mediatek.c ++++ b/drivers/pwm/pwm-mediatek.c +@@ -133,8 +133,10 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, + return ret; + + clk_rate = clk_get_rate(pc->clk_pwms[pwm->hwpwm]); +- if (!clk_rate) +- return -EINVAL; ++ if (!clk_rate) { ++ ret = -EINVAL; ++ goto out; ++ } + + /* Make sure we use the bus clock and not the 26MHz clock */ + if (pc->soc->has_ck_26m_sel) +@@ -153,9 +155,9 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, + } + + if (clkdiv > PWM_CLK_DIV_MAX) { +- pwm_mediatek_clk_disable(chip, pwm); + dev_err(chip->dev, "period of %d ns not supported\n", period_ns); +- return -EINVAL; ++ ret = -EINVAL; ++ goto out; + } + + if (pc->soc->pwm45_fixup && pwm->hwpwm > 2) { +@@ -172,9 +174,10 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, + pwm_mediatek_writel(pc, pwm->hwpwm, reg_width, cnt_period); + pwm_mediatek_writel(pc, pwm->hwpwm, reg_thres, cnt_duty); + ++out: + pwm_mediatek_clk_disable(chip, pwm); + +- return 0; ++ return ret; + } + + static int pwm_mediatek_enable(struct pwm_chip *chip, struct pwm_device *pwm) +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c +index c5ec7306aa7130..60c878ea95f92d 100644 +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -4392,6 +4392,7 @@ void do_unblank_screen(int leaving_gfx) + set_palette(vc); + set_cursor(vc); + vt_event_post(VT_EVENT_UNBLANK, vc->vc_num, vc->vc_num); ++ notify_update(vc); + } + EXPORT_SYMBOL(do_unblank_screen); + +diff --git a/drivers/usb/cdns3/cdnsp-debug.h b/drivers/usb/cdns3/cdnsp-debug.h +index f0ca865cce2a09..86860686d8363e 100644 +--- a/drivers/usb/cdns3/cdnsp-debug.h ++++ b/drivers/usb/cdns3/cdnsp-debug.h +@@ -131,8 +131,6 @@ static inline const char *cdnsp_trb_type_string(u8 type) + return "Endpoint Not ready"; + case TRB_HALT_ENDPOINT: + return "Halt Endpoint"; +- case TRB_FLUSH_ENDPOINT: +- return "FLush Endpoint"; + default: + return "UNKNOWN"; + } +@@ -189,203 +187,203 @@ static inline const char *cdnsp_decode_trb(char *str, size_t size, u32 field0, + + switch (type) { + case TRB_LINK: +- ret = snprintf(str, size, +- "LINK %08x%08x intr %ld type '%s' flags %c:%c:%c:%c", +- field1, field0, GET_INTR_TARGET(field2), +- cdnsp_trb_type_string(type), +- field3 & TRB_IOC ? 'I' : 'i', +- field3 & TRB_CHAIN ? 'C' : 'c', +- field3 & TRB_TC ? 'T' : 't', +- field3 & TRB_CYCLE ? 'C' : 'c'); ++ ret = scnprintf(str, size, ++ "LINK %08x%08x intr %ld type '%s' flags %c:%c:%c:%c", ++ field1, field0, GET_INTR_TARGET(field2), ++ cdnsp_trb_type_string(type), ++ field3 & TRB_IOC ? 'I' : 'i', ++ field3 & TRB_CHAIN ? 'C' : 'c', ++ field3 & TRB_TC ? 'T' : 't', ++ field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_TRANSFER: + case TRB_COMPLETION: + case TRB_PORT_STATUS: + case TRB_HC_EVENT: +- ret = snprintf(str, size, +- "ep%d%s(%d) type '%s' TRB %08x%08x status '%s'" +- " len %ld slot %ld flags %c:%c", +- ep_num, ep_id % 2 ? "out" : "in", +- TRB_TO_EP_INDEX(field3), +- cdnsp_trb_type_string(type), field1, field0, +- cdnsp_trb_comp_code_string(GET_COMP_CODE(field2)), +- EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3), +- field3 & EVENT_DATA ? 'E' : 'e', +- field3 & TRB_CYCLE ? 'C' : 'c'); ++ ret = scnprintf(str, size, ++ "ep%d%s(%d) type '%s' TRB %08x%08x status '%s'" ++ " len %ld slot %ld flags %c:%c", ++ ep_num, ep_id % 2 ? "out" : "in", ++ TRB_TO_EP_INDEX(field3), ++ cdnsp_trb_type_string(type), field1, field0, ++ cdnsp_trb_comp_code_string(GET_COMP_CODE(field2)), ++ EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3), ++ field3 & EVENT_DATA ? 'E' : 'e', ++ field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_MFINDEX_WRAP: +- ret = snprintf(str, size, "%s: flags %c", +- cdnsp_trb_type_string(type), +- field3 & TRB_CYCLE ? 'C' : 'c'); ++ ret = scnprintf(str, size, "%s: flags %c", ++ cdnsp_trb_type_string(type), ++ field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_SETUP: +- ret = snprintf(str, size, +- "type '%s' bRequestType %02x bRequest %02x " +- "wValue %02x%02x wIndex %02x%02x wLength %d " +- "length %ld TD size %ld intr %ld Setup ID %ld " +- "flags %c:%c:%c", +- cdnsp_trb_type_string(type), +- field0 & 0xff, +- (field0 & 0xff00) >> 8, +- (field0 & 0xff000000) >> 24, +- (field0 & 0xff0000) >> 16, +- (field1 & 0xff00) >> 8, +- field1 & 0xff, +- (field1 & 0xff000000) >> 16 | +- (field1 & 0xff0000) >> 16, +- TRB_LEN(field2), GET_TD_SIZE(field2), +- GET_INTR_TARGET(field2), +- TRB_SETUPID_TO_TYPE(field3), +- field3 & TRB_IDT ? 'D' : 'd', +- field3 & TRB_IOC ? 'I' : 'i', +- field3 & TRB_CYCLE ? 'C' : 'c'); ++ ret = scnprintf(str, size, ++ "type '%s' bRequestType %02x bRequest %02x " ++ "wValue %02x%02x wIndex %02x%02x wLength %d " ++ "length %ld TD size %ld intr %ld Setup ID %ld " ++ "flags %c:%c:%c", ++ cdnsp_trb_type_string(type), ++ field0 & 0xff, ++ (field0 & 0xff00) >> 8, ++ (field0 & 0xff000000) >> 24, ++ (field0 & 0xff0000) >> 16, ++ (field1 & 0xff00) >> 8, ++ field1 & 0xff, ++ (field1 & 0xff000000) >> 16 | ++ (field1 & 0xff0000) >> 16, ++ TRB_LEN(field2), GET_TD_SIZE(field2), ++ GET_INTR_TARGET(field2), ++ TRB_SETUPID_TO_TYPE(field3), ++ field3 & TRB_IDT ? 'D' : 'd', ++ field3 & TRB_IOC ? 'I' : 'i', ++ field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_DATA: +- ret = snprintf(str, size, +- "type '%s' Buffer %08x%08x length %ld TD size %ld " +- "intr %ld flags %c:%c:%c:%c:%c:%c:%c", +- cdnsp_trb_type_string(type), +- field1, field0, TRB_LEN(field2), +- GET_TD_SIZE(field2), +- GET_INTR_TARGET(field2), +- field3 & TRB_IDT ? 'D' : 'i', +- field3 & TRB_IOC ? 'I' : 'i', +- field3 & TRB_CHAIN ? 'C' : 'c', +- field3 & TRB_NO_SNOOP ? 'S' : 's', +- field3 & TRB_ISP ? 'I' : 'i', +- field3 & TRB_ENT ? 'E' : 'e', +- field3 & TRB_CYCLE ? 'C' : 'c'); ++ ret = scnprintf(str, size, ++ "type '%s' Buffer %08x%08x length %ld TD size %ld " ++ "intr %ld flags %c:%c:%c:%c:%c:%c:%c", ++ cdnsp_trb_type_string(type), ++ field1, field0, TRB_LEN(field2), ++ GET_TD_SIZE(field2), ++ GET_INTR_TARGET(field2), ++ field3 & TRB_IDT ? 'D' : 'i', ++ field3 & TRB_IOC ? 'I' : 'i', ++ field3 & TRB_CHAIN ? 'C' : 'c', ++ field3 & TRB_NO_SNOOP ? 'S' : 's', ++ field3 & TRB_ISP ? 'I' : 'i', ++ field3 & TRB_ENT ? 'E' : 'e', ++ field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_STATUS: +- ret = snprintf(str, size, +- "Buffer %08x%08x length %ld TD size %ld intr" +- "%ld type '%s' flags %c:%c:%c:%c", +- field1, field0, TRB_LEN(field2), +- GET_TD_SIZE(field2), +- GET_INTR_TARGET(field2), +- cdnsp_trb_type_string(type), +- field3 & TRB_IOC ? 'I' : 'i', +- field3 & TRB_CHAIN ? 'C' : 'c', +- field3 & TRB_ENT ? 'E' : 'e', +- field3 & TRB_CYCLE ? 'C' : 'c'); ++ ret = scnprintf(str, size, ++ "Buffer %08x%08x length %ld TD size %ld intr" ++ "%ld type '%s' flags %c:%c:%c:%c", ++ field1, field0, TRB_LEN(field2), ++ GET_TD_SIZE(field2), ++ GET_INTR_TARGET(field2), ++ cdnsp_trb_type_string(type), ++ field3 & TRB_IOC ? 'I' : 'i', ++ field3 & TRB_CHAIN ? 'C' : 'c', ++ field3 & TRB_ENT ? 'E' : 'e', ++ field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_NORMAL: + case TRB_ISOC: + case TRB_EVENT_DATA: + case TRB_TR_NOOP: +- ret = snprintf(str, size, +- "type '%s' Buffer %08x%08x length %ld " +- "TD size %ld intr %ld " +- "flags %c:%c:%c:%c:%c:%c:%c:%c:%c", +- cdnsp_trb_type_string(type), +- field1, field0, TRB_LEN(field2), +- GET_TD_SIZE(field2), +- GET_INTR_TARGET(field2), +- field3 & TRB_BEI ? 'B' : 'b', +- field3 & TRB_IDT ? 'T' : 't', +- field3 & TRB_IOC ? 'I' : 'i', +- field3 & TRB_CHAIN ? 'C' : 'c', +- field3 & TRB_NO_SNOOP ? 'S' : 's', +- field3 & TRB_ISP ? 'I' : 'i', +- field3 & TRB_ENT ? 'E' : 'e', +- field3 & TRB_CYCLE ? 'C' : 'c', +- !(field3 & TRB_EVENT_INVALIDATE) ? 'V' : 'v'); ++ ret = scnprintf(str, size, ++ "type '%s' Buffer %08x%08x length %ld " ++ "TD size %ld intr %ld " ++ "flags %c:%c:%c:%c:%c:%c:%c:%c:%c", ++ cdnsp_trb_type_string(type), ++ field1, field0, TRB_LEN(field2), ++ GET_TD_SIZE(field2), ++ GET_INTR_TARGET(field2), ++ field3 & TRB_BEI ? 'B' : 'b', ++ field3 & TRB_IDT ? 'T' : 't', ++ field3 & TRB_IOC ? 'I' : 'i', ++ field3 & TRB_CHAIN ? 'C' : 'c', ++ field3 & TRB_NO_SNOOP ? 'S' : 's', ++ field3 & TRB_ISP ? 'I' : 'i', ++ field3 & TRB_ENT ? 'E' : 'e', ++ field3 & TRB_CYCLE ? 'C' : 'c', ++ !(field3 & TRB_EVENT_INVALIDATE) ? 'V' : 'v'); + break; + case TRB_CMD_NOOP: + case TRB_ENABLE_SLOT: +- ret = snprintf(str, size, "%s: flags %c", +- cdnsp_trb_type_string(type), +- field3 & TRB_CYCLE ? 'C' : 'c'); ++ ret = scnprintf(str, size, "%s: flags %c", ++ cdnsp_trb_type_string(type), ++ field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_DISABLE_SLOT: +- ret = snprintf(str, size, "%s: slot %ld flags %c", +- cdnsp_trb_type_string(type), +- TRB_TO_SLOT_ID(field3), +- field3 & TRB_CYCLE ? 'C' : 'c'); ++ ret = scnprintf(str, size, "%s: slot %ld flags %c", ++ cdnsp_trb_type_string(type), ++ TRB_TO_SLOT_ID(field3), ++ field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_ADDR_DEV: +- ret = snprintf(str, size, +- "%s: ctx %08x%08x slot %ld flags %c:%c", +- cdnsp_trb_type_string(type), field1, field0, +- TRB_TO_SLOT_ID(field3), +- field3 & TRB_BSR ? 'B' : 'b', +- field3 & TRB_CYCLE ? 'C' : 'c'); ++ ret = scnprintf(str, size, ++ "%s: ctx %08x%08x slot %ld flags %c:%c", ++ cdnsp_trb_type_string(type), field1, field0, ++ TRB_TO_SLOT_ID(field3), ++ field3 & TRB_BSR ? 'B' : 'b', ++ field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_CONFIG_EP: +- ret = snprintf(str, size, +- "%s: ctx %08x%08x slot %ld flags %c:%c", +- cdnsp_trb_type_string(type), field1, field0, +- TRB_TO_SLOT_ID(field3), +- field3 & TRB_DC ? 'D' : 'd', +- field3 & TRB_CYCLE ? 'C' : 'c'); ++ ret = scnprintf(str, size, ++ "%s: ctx %08x%08x slot %ld flags %c:%c", ++ cdnsp_trb_type_string(type), field1, field0, ++ TRB_TO_SLOT_ID(field3), ++ field3 & TRB_DC ? 'D' : 'd', ++ field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_EVAL_CONTEXT: +- ret = snprintf(str, size, +- "%s: ctx %08x%08x slot %ld flags %c", +- cdnsp_trb_type_string(type), field1, field0, +- TRB_TO_SLOT_ID(field3), +- field3 & TRB_CYCLE ? 'C' : 'c'); ++ ret = scnprintf(str, size, ++ "%s: ctx %08x%08x slot %ld flags %c", ++ cdnsp_trb_type_string(type), field1, field0, ++ TRB_TO_SLOT_ID(field3), ++ field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_RESET_EP: + case TRB_HALT_ENDPOINT: +- case TRB_FLUSH_ENDPOINT: +- ret = snprintf(str, size, +- "%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c", +- cdnsp_trb_type_string(type), +- ep_num, ep_id % 2 ? "out" : "in", +- TRB_TO_EP_INDEX(field3), field1, field0, +- TRB_TO_SLOT_ID(field3), +- field3 & TRB_CYCLE ? 'C' : 'c'); ++ ret = scnprintf(str, size, ++ "%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c %c", ++ cdnsp_trb_type_string(type), ++ ep_num, ep_id % 2 ? "out" : "in", ++ TRB_TO_EP_INDEX(field3), field1, field0, ++ TRB_TO_SLOT_ID(field3), ++ field3 & TRB_CYCLE ? 'C' : 'c', ++ field3 & TRB_ESP ? 'P' : 'p'); + break; + case TRB_STOP_RING: +- ret = snprintf(str, size, +- "%s: ep%d%s(%d) slot %ld sp %d flags %c", +- cdnsp_trb_type_string(type), +- ep_num, ep_id % 2 ? "out" : "in", +- TRB_TO_EP_INDEX(field3), +- TRB_TO_SLOT_ID(field3), +- TRB_TO_SUSPEND_PORT(field3), +- field3 & TRB_CYCLE ? 'C' : 'c'); ++ ret = scnprintf(str, size, ++ "%s: ep%d%s(%d) slot %ld sp %d flags %c", ++ cdnsp_trb_type_string(type), ++ ep_num, ep_id % 2 ? "out" : "in", ++ TRB_TO_EP_INDEX(field3), ++ TRB_TO_SLOT_ID(field3), ++ TRB_TO_SUSPEND_PORT(field3), ++ field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_SET_DEQ: +- ret = snprintf(str, size, +- "%s: ep%d%s(%d) deq %08x%08x stream %ld slot %ld flags %c", +- cdnsp_trb_type_string(type), +- ep_num, ep_id % 2 ? "out" : "in", +- TRB_TO_EP_INDEX(field3), field1, field0, +- TRB_TO_STREAM_ID(field2), +- TRB_TO_SLOT_ID(field3), +- field3 & TRB_CYCLE ? 'C' : 'c'); ++ ret = scnprintf(str, size, ++ "%s: ep%d%s(%d) deq %08x%08x stream %ld slot %ld flags %c", ++ cdnsp_trb_type_string(type), ++ ep_num, ep_id % 2 ? "out" : "in", ++ TRB_TO_EP_INDEX(field3), field1, field0, ++ TRB_TO_STREAM_ID(field2), ++ TRB_TO_SLOT_ID(field3), ++ field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_RESET_DEV: +- ret = snprintf(str, size, "%s: slot %ld flags %c", +- cdnsp_trb_type_string(type), +- TRB_TO_SLOT_ID(field3), +- field3 & TRB_CYCLE ? 'C' : 'c'); ++ ret = scnprintf(str, size, "%s: slot %ld flags %c", ++ cdnsp_trb_type_string(type), ++ TRB_TO_SLOT_ID(field3), ++ field3 & TRB_CYCLE ? 'C' : 'c'); + break; + case TRB_ENDPOINT_NRDY: + temp = TRB_TO_HOST_STREAM(field2); + +- ret = snprintf(str, size, +- "%s: ep%d%s(%d) H_SID %x%s%s D_SID %lx flags %c:%c", +- cdnsp_trb_type_string(type), +- ep_num, ep_id % 2 ? "out" : "in", +- TRB_TO_EP_INDEX(field3), temp, +- temp == STREAM_PRIME_ACK ? "(PRIME)" : "", +- temp == STREAM_REJECTED ? "(REJECTED)" : "", +- TRB_TO_DEV_STREAM(field0), +- field3 & TRB_STAT ? 'S' : 's', +- field3 & TRB_CYCLE ? 'C' : 'c'); ++ ret = scnprintf(str, size, ++ "%s: ep%d%s(%d) H_SID %x%s%s D_SID %lx flags %c:%c", ++ cdnsp_trb_type_string(type), ++ ep_num, ep_id % 2 ? "out" : "in", ++ TRB_TO_EP_INDEX(field3), temp, ++ temp == STREAM_PRIME_ACK ? "(PRIME)" : "", ++ temp == STREAM_REJECTED ? "(REJECTED)" : "", ++ TRB_TO_DEV_STREAM(field0), ++ field3 & TRB_STAT ? 'S' : 's', ++ field3 & TRB_CYCLE ? 'C' : 'c'); + break; + default: +- ret = snprintf(str, size, +- "type '%s' -> raw %08x %08x %08x %08x", +- cdnsp_trb_type_string(type), +- field0, field1, field2, field3); ++ ret = scnprintf(str, size, ++ "type '%s' -> raw %08x %08x %08x %08x", ++ cdnsp_trb_type_string(type), ++ field0, field1, field2, field3); + } + +- if (ret >= size) +- pr_info("CDNSP: buffer overflowed.\n"); ++ if (ret == size - 1) ++ pr_info("CDNSP: buffer may be truncated.\n"); + + return str; + } +@@ -468,32 +466,32 @@ static inline const char *cdnsp_decode_portsc(char *str, size_t size, + { + int ret; + +- ret = snprintf(str, size, "%s %s %s Link:%s PortSpeed:%d ", +- portsc & PORT_POWER ? "Powered" : "Powered-off", +- portsc & PORT_CONNECT ? "Connected" : "Not-connected", +- portsc & PORT_PED ? "Enabled" : "Disabled", +- cdnsp_portsc_link_state_string(portsc), +- DEV_PORT_SPEED(portsc)); ++ ret = scnprintf(str, size, "%s %s %s Link:%s PortSpeed:%d ", ++ portsc & PORT_POWER ? "Powered" : "Powered-off", ++ portsc & PORT_CONNECT ? "Connected" : "Not-connected", ++ portsc & PORT_PED ? "Enabled" : "Disabled", ++ cdnsp_portsc_link_state_string(portsc), ++ DEV_PORT_SPEED(portsc)); + + if (portsc & PORT_RESET) +- ret += snprintf(str + ret, size - ret, "In-Reset "); ++ ret += scnprintf(str + ret, size - ret, "In-Reset "); + +- ret += snprintf(str + ret, size - ret, "Change: "); ++ ret += scnprintf(str + ret, size - ret, "Change: "); + if (portsc & PORT_CSC) +- ret += snprintf(str + ret, size - ret, "CSC "); ++ ret += scnprintf(str + ret, size - ret, "CSC "); + if (portsc & PORT_WRC) +- ret += snprintf(str + ret, size - ret, "WRC "); ++ ret += scnprintf(str + ret, size - ret, "WRC "); + if (portsc & PORT_RC) +- ret += snprintf(str + ret, size - ret, "PRC "); ++ ret += scnprintf(str + ret, size - ret, "PRC "); + if (portsc & PORT_PLC) +- ret += snprintf(str + ret, size - ret, "PLC "); ++ ret += scnprintf(str + ret, size - ret, "PLC "); + if (portsc & PORT_CEC) +- ret += snprintf(str + ret, size - ret, "CEC "); +- ret += snprintf(str + ret, size - ret, "Wake: "); ++ ret += scnprintf(str + ret, size - ret, "CEC "); ++ ret += scnprintf(str + ret, size - ret, "Wake: "); + if (portsc & PORT_WKCONN_E) +- ret += snprintf(str + ret, size - ret, "WCE "); ++ ret += scnprintf(str + ret, size - ret, "WCE "); + if (portsc & PORT_WKDISC_E) +- ret += snprintf(str + ret, size - ret, "WDE "); ++ ret += scnprintf(str + ret, size - ret, "WDE "); + + return str; + } +@@ -565,20 +563,20 @@ static inline const char *cdnsp_decode_ep_context(char *str, size_t size, + + avg = EP_AVG_TRB_LENGTH(tx_info); + +- ret = snprintf(str, size, "State %s mult %d max P. Streams %d %s", +- cdnsp_ep_state_string(ep_state), mult, +- max_pstr, lsa ? "LSA " : ""); ++ ret = scnprintf(str, size, "State %s mult %d max P. Streams %d %s", ++ cdnsp_ep_state_string(ep_state), mult, ++ max_pstr, lsa ? "LSA " : ""); + +- ret += snprintf(str + ret, size - ret, +- "interval %d us max ESIT payload %d CErr %d ", +- (1 << interval) * 125, esit, cerr); ++ ret += scnprintf(str + ret, size - ret, ++ "interval %d us max ESIT payload %d CErr %d ", ++ (1 << interval) * 125, esit, cerr); + +- ret += snprintf(str + ret, size - ret, +- "Type %s %sburst %d maxp %d deq %016llx ", +- cdnsp_ep_type_string(ep_type), hid ? "HID" : "", +- burst, maxp, deq); ++ ret += scnprintf(str + ret, size - ret, ++ "Type %s %sburst %d maxp %d deq %016llx ", ++ cdnsp_ep_type_string(ep_type), hid ? "HID" : "", ++ burst, maxp, deq); + +- ret += snprintf(str + ret, size - ret, "avg trb len %d", avg); ++ ret += scnprintf(str + ret, size - ret, "avg trb len %d", avg); + + return str; + } +diff --git a/drivers/usb/cdns3/cdnsp-ep0.c b/drivers/usb/cdns3/cdnsp-ep0.c +index f317d3c8478108..5cd9b898ce971f 100644 +--- a/drivers/usb/cdns3/cdnsp-ep0.c ++++ b/drivers/usb/cdns3/cdnsp-ep0.c +@@ -414,6 +414,7 @@ static int cdnsp_ep0_std_request(struct cdnsp_device *pdev, + void cdnsp_setup_analyze(struct cdnsp_device *pdev) + { + struct usb_ctrlrequest *ctrl = &pdev->setup; ++ struct cdnsp_ep *pep; + int ret = -EINVAL; + u16 len; + +@@ -427,10 +428,21 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev) + goto out; + } + ++ pep = &pdev->eps[0]; ++ + /* Restore the ep0 to Stopped/Running state. */ +- if (pdev->eps[0].ep_state & EP_HALTED) { +- trace_cdnsp_ep0_halted("Restore to normal state"); +- cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0); ++ if (pep->ep_state & EP_HALTED) { ++ if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_HALTED) ++ cdnsp_halt_endpoint(pdev, pep, 0); ++ ++ /* ++ * Halt Endpoint Command for SSP2 for ep0 preserve current ++ * endpoint state and driver has to synchronize the ++ * software endpoint state with endpoint output context ++ * state. ++ */ ++ pep->ep_state &= ~EP_HALTED; ++ pep->ep_state |= EP_STOPPED; + } + + /* +diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c +index 132885fbb98f67..38e693cd3efc05 100644 +--- a/drivers/usb/cdns3/cdnsp-gadget.c ++++ b/drivers/usb/cdns3/cdnsp-gadget.c +@@ -1061,10 +1061,8 @@ static int cdnsp_gadget_ep_disable(struct usb_ep *ep) + pep->ep_state |= EP_DIS_IN_RROGRESS; + + /* Endpoint was unconfigured by Reset Device command. */ +- if (!(pep->ep_state & EP_UNCONFIGURED)) { ++ if (!(pep->ep_state & EP_UNCONFIGURED)) + cdnsp_cmd_stop_ep(pdev, pep); +- cdnsp_cmd_flush_ep(pdev, pep); +- } + + /* Remove all queued USB requests. */ + while (!list_empty(&pep->pending_list)) { +@@ -1461,8 +1459,6 @@ static void cdnsp_stop(struct cdnsp_device *pdev) + { + u32 temp; + +- cdnsp_cmd_flush_ep(pdev, &pdev->eps[0]); +- + /* Remove internally queued request for ep0. */ + if (!list_empty(&pdev->eps[0].pending_list)) { + struct cdnsp_request *req; +diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h +index 909cee01772a70..a91cca509db080 100644 +--- a/drivers/usb/cdns3/cdnsp-gadget.h ++++ b/drivers/usb/cdns3/cdnsp-gadget.h +@@ -987,6 +987,12 @@ enum cdnsp_setup_dev { + #define STREAM_ID_FOR_TRB(p) ((((p)) << 16) & GENMASK(31, 16)) + #define SCT_FOR_TRB(p) (((p) << 1) & 0x7) + ++/* ++ * Halt Endpoint Command TRB field. ++ * The ESP bit only exists in the SSP2 controller. ++ */ ++#define TRB_ESP BIT(9) ++ + /* Link TRB specific fields. */ + #define TRB_TC BIT(1) + +@@ -1138,8 +1144,6 @@ union cdnsp_trb { + #define TRB_HALT_ENDPOINT 54 + /* Doorbell Overflow Event. */ + #define TRB_DRB_OVERFLOW 57 +-/* Flush Endpoint Command. */ +-#define TRB_FLUSH_ENDPOINT 58 + + #define TRB_TYPE_LINK(x) (((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)) + #define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \ +@@ -1552,8 +1556,6 @@ void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev, + void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index); + void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, + unsigned int ep_index); +-void cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev, +- unsigned int ep_index); + void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num); + void cdnsp_queue_reset_device(struct cdnsp_device *pdev); + void cdnsp_queue_new_dequeue_state(struct cdnsp_device *pdev, +@@ -1587,7 +1589,6 @@ void cdnsp_irq_reset(struct cdnsp_device *pdev); + int cdnsp_halt_endpoint(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, int value); + int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep); +-int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep); + void cdnsp_setup_analyze(struct cdnsp_device *pdev); + int cdnsp_status_stage(struct cdnsp_device *pdev); + int cdnsp_reset_device(struct cdnsp_device *pdev); +diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c +index 3b17d9e4b07d8c..0758f171f73ecf 100644 +--- a/drivers/usb/cdns3/cdnsp-ring.c ++++ b/drivers/usb/cdns3/cdnsp-ring.c +@@ -2159,19 +2159,6 @@ int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep) + return ret; + } + +-int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep) +-{ +- int ret; +- +- cdnsp_queue_flush_endpoint(pdev, pep->idx); +- cdnsp_ring_cmd_db(pdev); +- ret = cdnsp_wait_for_cmd_compl(pdev); +- +- trace_cdnsp_handle_cmd_flush_ep(pep->out_ctx); +- +- return ret; +-} +- + /* + * The transfer burst count field of the isochronous TRB defines the number of + * bursts that are required to move all packets in this TD. Only SuperSpeed +@@ -2498,18 +2485,8 @@ void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, unsigned int ep_index) + { + cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_HALT_ENDPOINT) | + SLOT_ID_FOR_TRB(pdev->slot_id) | +- EP_ID_FOR_TRB(ep_index)); +-} +- +-/* +- * Queue a flush endpoint request on the command ring. +- */ +-void cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev, +- unsigned int ep_index) +-{ +- cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_FLUSH_ENDPOINT) | +- SLOT_ID_FOR_TRB(pdev->slot_id) | +- EP_ID_FOR_TRB(ep_index)); ++ EP_ID_FOR_TRB(ep_index) | ++ (!ep_index ? TRB_ESP : 0)); + } + + void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num) +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c +index 30404461ef7de3..b7eaad099309c5 100644 +--- a/drivers/usb/dwc3/core.c ++++ b/drivers/usb/dwc3/core.c +@@ -2128,6 +2128,7 @@ static int dwc3_core_init_for_resume(struct dwc3 *dwc) + static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) + { + u32 reg; ++ int ret; + + if (!pm_runtime_suspended(dwc->dev) && !PMSG_IS_AUTO(msg)) { + dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) & +@@ -2146,7 +2147,9 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) + case DWC3_GCTL_PRTCAP_DEVICE: + if (pm_runtime_suspended(dwc->dev)) + break; +- dwc3_gadget_suspend(dwc); ++ ret = dwc3_gadget_suspend(dwc); ++ if (ret) ++ return ret; + synchronize_irq(dwc->irq_gadget); + dwc3_core_exit(dwc); + break; +@@ -2177,7 +2180,9 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) + break; + + if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { +- dwc3_gadget_suspend(dwc); ++ ret = dwc3_gadget_suspend(dwc); ++ if (ret) ++ return ret; + synchronize_irq(dwc->irq_gadget); + } + +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index f51d743bb3ecc6..a17af4ab20a323 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -4802,26 +4802,22 @@ int dwc3_gadget_suspend(struct dwc3 *dwc) + int ret; + + ret = dwc3_gadget_soft_disconnect(dwc); +- if (ret) +- goto err; +- +- spin_lock_irqsave(&dwc->lock, flags); +- if (dwc->gadget_driver) +- dwc3_disconnect_gadget(dwc); +- spin_unlock_irqrestore(&dwc->lock, flags); +- +- return 0; +- +-err: + /* + * Attempt to reset the controller's state. Likely no + * communication can be established until the host + * performs a port reset. + */ +- if (dwc->softconnect) ++ if (ret && dwc->softconnect) { + dwc3_gadget_soft_connect(dwc); ++ return -EAGAIN; ++ } + +- return ret; ++ spin_lock_irqsave(&dwc->lock, flags); ++ if (dwc->gadget_driver) ++ dwc3_disconnect_gadget(dwc); ++ spin_unlock_irqrestore(&dwc->lock, flags); ++ ++ return 0; + } + + int dwc3_gadget_resume(struct dwc3 *dwc) +diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c +index 729b0472bab098..4925c874316cb2 100644 +--- a/drivers/usb/gadget/function/u_serial.c ++++ b/drivers/usb/gadget/function/u_serial.c +@@ -291,8 +291,8 @@ __acquires(&port->port_lock) + break; + } + +- if (do_tty_wake && port->port.tty) +- tty_wakeup(port->port.tty); ++ if (do_tty_wake) ++ tty_port_tty_wakeup(&port->port); + return status; + } + +@@ -539,20 +539,16 @@ static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, + static int gs_start_io(struct gs_port *port) + { + struct list_head *head = &port->read_pool; +- struct usb_ep *ep; ++ struct usb_ep *ep = port->port_usb->out; + int status; + unsigned started; + +- if (!port->port_usb || !port->port.tty) +- return -EIO; +- + /* Allocate RX and TX I/O buffers. We can't easily do this much + * earlier (with GFP_KERNEL) because the requests are coupled to + * endpoints, as are the packet sizes we'll be using. Different + * configurations may use different endpoints with a given port; + * and high speed vs full speed changes packet sizes too. + */ +- ep = port->port_usb->out; + status = gs_alloc_requests(ep, head, gs_read_complete, + &port->read_allocated); + if (status) +@@ -573,7 +569,7 @@ static int gs_start_io(struct gs_port *port) + gs_start_tx(port); + /* Unblock any pending writes into our circular buffer, in case + * we didn't in gs_start_tx() */ +- tty_wakeup(port->port.tty); ++ tty_port_tty_wakeup(&port->port); + } else { + /* Free reqs only if we are still connected */ + if (port->port_usb) { +diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h +index ec6679a538c1dc..c23c56ead6b236 100644 +--- a/fs/btrfs/btrfs_inode.h ++++ b/fs/btrfs/btrfs_inode.h +@@ -488,7 +488,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, + int btrfs_update_inode(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct btrfs_inode *inode); + int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, +- struct btrfs_root *root, struct btrfs_inode *inode); ++ struct btrfs_inode *inode); + int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct btrfs_inode *inode); + int btrfs_orphan_cleanup(struct btrfs_root *root); + int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size); +diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c +index a0d8160b537572..300ee0b68b498e 100644 +--- a/fs/btrfs/free-space-tree.c ++++ b/fs/btrfs/free-space-tree.c +@@ -1104,11 +1104,21 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans, + ret = btrfs_search_slot_for_read(extent_root, &key, path, 1, 0); + if (ret < 0) + goto out_locked; +- ASSERT(ret == 0); ++ /* ++ * If ret is 1 (no key found), it means this is an empty block group, ++ * without any extents allocated from it and there's no block group ++ * item (key BTRFS_BLOCK_GROUP_ITEM_KEY) located in the extent tree ++ * because we are using the block group tree feature, so block group ++ * items are stored in the block group tree. It also means there are no ++ * extents allocated for block groups with a start offset beyond this ++ * block group's end offset (this is the last, highest, block group). ++ */ ++ if (!btrfs_fs_compat_ro(trans->fs_info, BLOCK_GROUP_TREE)) ++ ASSERT(ret == 0); + + start = block_group->start; + end = block_group->start + block_group->length; +- while (1) { ++ while (ret == 0) { + btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); + + if (key.type == BTRFS_EXTENT_ITEM_KEY || +@@ -1138,8 +1148,6 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans, + ret = btrfs_next_item(extent_root, path); + if (ret < 0) + goto out_locked; +- if (ret) +- break; + } + if (start < end) { + ret = __add_to_free_space_tree(trans, block_group, path2, +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index e8e57abb032d7a..218d15f5ddf737 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -3077,7 +3077,7 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent) + goto out; + } + trans->block_rsv = &inode->block_rsv; +- ret = btrfs_update_inode_fallback(trans, root, inode); ++ ret = btrfs_update_inode_fallback(trans, inode); + if (ret) /* -ENOMEM or corruption */ + btrfs_abort_transaction(trans, ret); + goto out; +@@ -3143,7 +3143,7 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent) + &cached_state); + + btrfs_inode_safe_disk_i_size_write(inode, 0); +- ret = btrfs_update_inode_fallback(trans, root, inode); ++ ret = btrfs_update_inode_fallback(trans, inode); + if (ret) { /* -ENOMEM or corruption */ + btrfs_abort_transaction(trans, ret); + goto out; +@@ -4014,9 +4014,9 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, + /* + * copy everything in the in-memory inode into the btree. + */ +-noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, +- struct btrfs_root *root, +- struct btrfs_inode *inode) ++int btrfs_update_inode(struct btrfs_trans_handle *trans, ++ struct btrfs_root *root, ++ struct btrfs_inode *inode) + { + struct btrfs_fs_info *fs_info = root->fs_info; + int ret; +@@ -4043,13 +4043,13 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, + } + + int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, +- struct btrfs_root *root, struct btrfs_inode *inode) ++ struct btrfs_inode *inode) + { + int ret; + +- ret = btrfs_update_inode(trans, root, inode); ++ ret = btrfs_update_inode(trans, inode->root, inode); + if (ret == -ENOSPC) +- return btrfs_update_inode_item(trans, root, inode); ++ return btrfs_update_inode_item(trans, inode->root, inode); + return ret; + } + +@@ -4327,7 +4327,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, + btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2); + inode_inc_iversion(&dir->vfs_inode); + dir->vfs_inode.i_mtime = inode_set_ctime_current(&dir->vfs_inode); +- ret = btrfs_update_inode_fallback(trans, root, dir); ++ ret = btrfs_update_inode_fallback(trans, dir); + if (ret) + btrfs_abort_transaction(trans, ret); + out: +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c +index aa03db69a0164c..3989cb19cdae70 100644 +--- a/fs/btrfs/transaction.c ++++ b/fs/btrfs/transaction.c +@@ -1860,7 +1860,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, + btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size + + fname.disk_name.len * 2); + parent_inode->i_mtime = inode_set_ctime_current(parent_inode); +- ret = btrfs_update_inode_fallback(trans, parent_root, BTRFS_I(parent_inode)); ++ ret = btrfs_update_inode_fallback(trans, BTRFS_I(parent_inode)); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto fail; +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index 13377c3b22897d..16434106c465db 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -140,11 +140,14 @@ static void wait_log_commit(struct btrfs_root *root, int transid); + * and once to do all the other items. + */ + +-static struct inode *btrfs_iget_logging(u64 objectid, struct btrfs_root *root) ++static struct btrfs_inode *btrfs_iget_logging(u64 objectid, struct btrfs_root *root) + { + unsigned int nofs_flag; + struct inode *inode; + ++ /* Only meant to be called for subvolume roots and not for log roots. */ ++ ASSERT(is_fstree(btrfs_root_id(root))); ++ + /* + * We're holding a transaction handle whether we are logging or + * replaying a log tree, so we must make sure NOFS semantics apply +@@ -156,7 +159,10 @@ static struct inode *btrfs_iget_logging(u64 objectid, struct btrfs_root *root) + inode = btrfs_iget(root->fs_info->sb, objectid, root); + memalloc_nofs_restore(nofs_flag); + +- return inode; ++ if (IS_ERR(inode)) ++ return ERR_CAST(inode); ++ ++ return BTRFS_I(inode); + } + + /* +@@ -613,20 +619,6 @@ static int read_alloc_one_name(struct extent_buffer *eb, void *start, int len, + return 0; + } + +-/* +- * simple helper to read an inode off the disk from a given root +- * This can only be called for subvolume roots and not for the log +- */ +-static noinline struct inode *read_one_inode(struct btrfs_root *root, +- u64 objectid) +-{ +- struct inode *inode; +- +- inode = btrfs_iget_logging(objectid, root); +- if (IS_ERR(inode)) +- inode = NULL; +- return inode; +-} + + /* replays a single extent in 'eb' at 'slot' with 'key' into the + * subvolume 'root'. path is released on entry and should be released +@@ -681,10 +673,15 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, + goto out; + } + +- inode = read_one_inode(root, key->objectid); +- if (!inode) { +- ret = -EIO; +- goto out; ++ { ++ struct btrfs_inode *btrfs_inode; ++ ++ btrfs_inode = btrfs_iget_logging(key->objectid, root); ++ if (IS_ERR(btrfs_inode)) { ++ ret = PTR_ERR(btrfs_inode); ++ goto out; ++ } ++ inode = &btrfs_inode->vfs_inode; + } + + /* +@@ -963,10 +960,16 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, + + btrfs_release_path(path); + +- inode = read_one_inode(root, location.objectid); +- if (!inode) { +- ret = -EIO; +- goto out; ++ { ++ struct btrfs_inode *btrfs_inode; ++ ++ btrfs_inode = btrfs_iget_logging(location.objectid, root); ++ if (IS_ERR(btrfs_inode)) { ++ ret = PTR_ERR(btrfs_inode); ++ inode = NULL; ++ goto out; ++ } ++ inode = &btrfs_inode->vfs_inode; + } + + ret = link_to_fixup_dir(trans, root, path, location.objectid); +@@ -1183,18 +1186,21 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans, + kfree(victim_name.name); + return ret; + } else if (!ret) { +- ret = -ENOENT; +- victim_parent = read_one_inode(root, +- parent_objectid); +- if (victim_parent) { ++ struct btrfs_inode *btrfs_victim; ++ ++ btrfs_victim = btrfs_iget_logging(parent_objectid, root); ++ if (IS_ERR(btrfs_victim)) { ++ ret = PTR_ERR(btrfs_victim); ++ } else { ++ victim_parent = &btrfs_victim->vfs_inode; + inc_nlink(&inode->vfs_inode); + btrfs_release_path(path); + + ret = unlink_inode_for_log_replay(trans, + BTRFS_I(victim_parent), + inode, &victim_name); ++ iput(victim_parent); + } +- iput(victim_parent); + kfree(victim_name.name); + if (ret) + return ret; +@@ -1331,11 +1337,16 @@ static int unlink_old_inode_refs(struct btrfs_trans_handle *trans, + struct inode *dir; + + btrfs_release_path(path); +- dir = read_one_inode(root, parent_id); +- if (!dir) { +- ret = -ENOENT; +- kfree(name.name); +- goto out; ++ { ++ struct btrfs_inode *btrfs_dir; ++ ++ btrfs_dir = btrfs_iget_logging(parent_id, root); ++ if (IS_ERR(btrfs_dir)) { ++ ret = PTR_ERR(btrfs_dir); ++ kfree(name.name); ++ goto out; ++ } ++ dir = &btrfs_dir->vfs_inode; + } + ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir), + inode, &name); +@@ -1406,16 +1417,28 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, + * copy the back ref in. The link count fixup code will take + * care of the rest + */ +- dir = read_one_inode(root, parent_objectid); +- if (!dir) { +- ret = -ENOENT; +- goto out; ++ { ++ struct btrfs_inode *btrfs_dir; ++ ++ btrfs_dir = btrfs_iget_logging(parent_objectid, root); ++ if (IS_ERR(btrfs_dir)) { ++ ret = PTR_ERR(btrfs_dir); ++ dir = NULL; ++ goto out; ++ } ++ dir = &btrfs_dir->vfs_inode; + } + +- inode = read_one_inode(root, inode_objectid); +- if (!inode) { +- ret = -EIO; +- goto out; ++ { ++ struct btrfs_inode *btrfs_inode; ++ ++ btrfs_inode = btrfs_iget_logging(inode_objectid, root); ++ if (IS_ERR(btrfs_inode)) { ++ ret = PTR_ERR(btrfs_inode); ++ inode = NULL; ++ goto out; ++ } ++ inode = &btrfs_inode->vfs_inode; + } + + while (ref_ptr < ref_end) { +@@ -1426,11 +1449,16 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, + * parent object can change from one array + * item to another. + */ +- if (!dir) +- dir = read_one_inode(root, parent_objectid); + if (!dir) { +- ret = -ENOENT; +- goto out; ++ struct btrfs_inode *btrfs_dir; ++ ++ btrfs_dir = btrfs_iget_logging(parent_objectid, root); ++ if (IS_ERR(btrfs_dir)) { ++ ret = PTR_ERR(btrfs_dir); ++ dir = NULL; ++ goto out; ++ } ++ dir = &btrfs_dir->vfs_inode; + } + } else { + ret = ref_get_fields(eb, ref_ptr, &name, &ref_index); +@@ -1504,8 +1532,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, + return ret; + } + +-static int count_inode_extrefs(struct btrfs_root *root, +- struct btrfs_inode *inode, struct btrfs_path *path) ++static int count_inode_extrefs(struct btrfs_inode *inode, struct btrfs_path *path) + { + int ret = 0; + int name_len; +@@ -1519,8 +1546,8 @@ static int count_inode_extrefs(struct btrfs_root *root, + struct extent_buffer *leaf; + + while (1) { +- ret = btrfs_find_one_extref(root, inode_objectid, offset, path, +- &extref, &offset); ++ ret = btrfs_find_one_extref(inode->root, inode_objectid, offset, ++ path, &extref, &offset); + if (ret) + break; + +@@ -1548,8 +1575,7 @@ static int count_inode_extrefs(struct btrfs_root *root, + return nlink; + } + +-static int count_inode_refs(struct btrfs_root *root, +- struct btrfs_inode *inode, struct btrfs_path *path) ++static int count_inode_refs(struct btrfs_inode *inode, struct btrfs_path *path) + { + int ret; + struct btrfs_key key; +@@ -1564,7 +1590,7 @@ static int count_inode_refs(struct btrfs_root *root, + key.offset = (u64)-1; + + while (1) { +- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); ++ ret = btrfs_search_slot(NULL, inode->root, &key, path, 0, 0); + if (ret < 0) + break; + if (ret > 0) { +@@ -1616,9 +1642,9 @@ static int count_inode_refs(struct btrfs_root *root, + * will free the inode. + */ + static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, +- struct btrfs_root *root, + struct inode *inode) + { ++ struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_path *path; + int ret; + u64 nlink = 0; +@@ -1628,13 +1654,13 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, + if (!path) + return -ENOMEM; + +- ret = count_inode_refs(root, BTRFS_I(inode), path); ++ ret = count_inode_refs(BTRFS_I(inode), path); + if (ret < 0) + goto out; + + nlink = ret; + +- ret = count_inode_extrefs(root, BTRFS_I(inode), path); ++ ret = count_inode_extrefs(BTRFS_I(inode), path); + if (ret < 0) + goto out; + +@@ -1700,13 +1726,18 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, + break; + + btrfs_release_path(path); +- inode = read_one_inode(root, key.offset); +- if (!inode) { +- ret = -EIO; +- break; ++ { ++ struct btrfs_inode *btrfs_inode; ++ ++ btrfs_inode = btrfs_iget_logging(key.offset, root); ++ if (IS_ERR(btrfs_inode)) { ++ ret = PTR_ERR(btrfs_inode); ++ break; ++ } ++ inode = &btrfs_inode->vfs_inode; + } + +- ret = fixup_inode_link_count(trans, root, inode); ++ ret = fixup_inode_link_count(trans, inode); + iput(inode); + if (ret) + break; +@@ -1737,9 +1768,14 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, + int ret = 0; + struct inode *inode; + +- inode = read_one_inode(root, objectid); +- if (!inode) +- return -EIO; ++ { ++ struct btrfs_inode *btrfs_inode; ++ ++ btrfs_inode = btrfs_iget_logging(objectid, root); ++ if (IS_ERR(btrfs_inode)) ++ return PTR_ERR(btrfs_inode); ++ inode = &btrfs_inode->vfs_inode; ++ } + + key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; + key.type = BTRFS_ORPHAN_ITEM_KEY; +@@ -1777,14 +1813,24 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans, + struct inode *dir; + int ret; + +- inode = read_one_inode(root, location->objectid); +- if (!inode) +- return -ENOENT; ++ { ++ struct btrfs_inode *btrfs_inode; + +- dir = read_one_inode(root, dirid); +- if (!dir) { +- iput(inode); +- return -EIO; ++ btrfs_inode = btrfs_iget_logging(location->objectid, root); ++ if (IS_ERR(btrfs_inode)) ++ return PTR_ERR(btrfs_inode); ++ inode = &btrfs_inode->vfs_inode; ++ } ++ ++ { ++ struct btrfs_inode *btrfs_dir; ++ ++ btrfs_dir = btrfs_iget_logging(dirid, root); ++ if (IS_ERR(btrfs_dir)) { ++ iput(inode); ++ return PTR_ERR(btrfs_dir); ++ } ++ dir = &btrfs_dir->vfs_inode; + } + + ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, +@@ -1862,9 +1908,14 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, + bool update_size = true; + bool name_added = false; + +- dir = read_one_inode(root, key->objectid); +- if (!dir) +- return -EIO; ++ { ++ struct btrfs_inode *btrfs_dir; ++ ++ btrfs_dir = btrfs_iget_logging(key->objectid, root); ++ if (IS_ERR(btrfs_dir)) ++ return PTR_ERR(btrfs_dir); ++ dir = &btrfs_dir->vfs_inode; ++ } + + ret = read_alloc_one_name(eb, di + 1, btrfs_dir_name_len(eb, di), &name); + if (ret) +@@ -2166,10 +2217,16 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans, + btrfs_dir_item_key_to_cpu(eb, di, &location); + btrfs_release_path(path); + btrfs_release_path(log_path); +- inode = read_one_inode(root, location.objectid); +- if (!inode) { +- ret = -EIO; +- goto out; ++ { ++ struct btrfs_inode *btrfs_inode; ++ ++ btrfs_inode = btrfs_iget_logging(location.objectid, root); ++ if (IS_ERR(btrfs_inode)) { ++ ret = PTR_ERR(btrfs_inode); ++ inode = NULL; ++ goto out; ++ } ++ inode = &btrfs_inode->vfs_inode; + } + + ret = link_to_fixup_dir(trans, root, path, location.objectid); +@@ -2320,14 +2377,22 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, + if (!log_path) + return -ENOMEM; + +- dir = read_one_inode(root, dirid); +- /* it isn't an error if the inode isn't there, that can happen +- * because we replay the deletes before we copy in the inode item +- * from the log +- */ +- if (!dir) { +- btrfs_free_path(log_path); +- return 0; ++ { ++ struct btrfs_inode *btrfs_dir; ++ ++ btrfs_dir = btrfs_iget_logging(dirid, root); ++ /* ++ * It isn't an error if the inode isn't there, that can happen because ++ * we replay the deletes before we copy in the inode item from the log. ++ */ ++ if (IS_ERR(btrfs_dir)) { ++ btrfs_free_path(log_path); ++ ret = PTR_ERR(btrfs_dir); ++ if (ret == -ENOENT) ++ ret = 0; ++ return ret; ++ } ++ dir = &btrfs_dir->vfs_inode; + } + + range_start = 0; +@@ -2486,10 +2551,15 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, + struct inode *inode; + u64 from; + +- inode = read_one_inode(root, key.objectid); +- if (!inode) { +- ret = -EIO; +- break; ++ { ++ struct btrfs_inode *btrfs_inode; ++ ++ btrfs_inode = btrfs_iget_logging(key.objectid, root); ++ if (IS_ERR(btrfs_inode)) { ++ ret = PTR_ERR(btrfs_inode); ++ break; ++ } ++ inode = &btrfs_inode->vfs_inode; + } + from = ALIGN(i_size_read(inode), + root->fs_info->sectorsize); +@@ -5421,7 +5491,6 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, + ihold(&curr_inode->vfs_inode); + + while (true) { +- struct inode *vfs_inode; + struct btrfs_key key; + struct btrfs_key found_key; + u64 next_index; +@@ -5437,7 +5506,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, + struct extent_buffer *leaf = path->nodes[0]; + struct btrfs_dir_item *di; + struct btrfs_key di_key; +- struct inode *di_inode; ++ struct btrfs_inode *di_inode; + int log_mode = LOG_INODE_EXISTS; + int type; + +@@ -5464,17 +5533,16 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, + goto out; + } + +- if (!need_log_inode(trans, BTRFS_I(di_inode))) { +- btrfs_add_delayed_iput(BTRFS_I(di_inode)); ++ if (!need_log_inode(trans, di_inode)) { ++ btrfs_add_delayed_iput(di_inode); + break; + } + + ctx->log_new_dentries = false; + if (type == BTRFS_FT_DIR) + log_mode = LOG_INODE_ALL; +- ret = btrfs_log_inode(trans, BTRFS_I(di_inode), +- log_mode, ctx); +- btrfs_add_delayed_iput(BTRFS_I(di_inode)); ++ ret = btrfs_log_inode(trans, di_inode, log_mode, ctx); ++ btrfs_add_delayed_iput(di_inode); + if (ret) + goto out; + if (ctx->log_new_dentries) { +@@ -5516,14 +5584,13 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, + kfree(dir_elem); + + btrfs_add_delayed_iput(curr_inode); +- curr_inode = NULL; + +- vfs_inode = btrfs_iget_logging(ino, root); +- if (IS_ERR(vfs_inode)) { +- ret = PTR_ERR(vfs_inode); ++ curr_inode = btrfs_iget_logging(ino, root); ++ if (IS_ERR(curr_inode)) { ++ ret = PTR_ERR(curr_inode); ++ curr_inode = NULL; + break; + } +- curr_inode = BTRFS_I(vfs_inode); + } + out: + btrfs_free_path(path); +@@ -5601,7 +5668,7 @@ static int add_conflicting_inode(struct btrfs_trans_handle *trans, + struct btrfs_log_ctx *ctx) + { + struct btrfs_ino_list *ino_elem; +- struct inode *inode; ++ struct btrfs_inode *inode; + + /* + * It's rare to have a lot of conflicting inodes, in practice it is not +@@ -5692,12 +5759,12 @@ static int add_conflicting_inode(struct btrfs_trans_handle *trans, + * inode in LOG_INODE_EXISTS mode and rename operations update the log, + * so that the log ends up with the new name and without the old name. + */ +- if (!need_log_inode(trans, BTRFS_I(inode))) { +- btrfs_add_delayed_iput(BTRFS_I(inode)); ++ if (!need_log_inode(trans, inode)) { ++ btrfs_add_delayed_iput(inode); + return 0; + } + +- btrfs_add_delayed_iput(BTRFS_I(inode)); ++ btrfs_add_delayed_iput(inode); + + ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS); + if (!ino_elem) +@@ -5733,7 +5800,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, + */ + while (!list_empty(&ctx->conflict_inodes)) { + struct btrfs_ino_list *curr; +- struct inode *inode; ++ struct btrfs_inode *inode; + u64 ino; + u64 parent; + +@@ -5769,9 +5836,8 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, + * dir index key range logged for the directory. So we + * must make sure the deletion is recorded. + */ +- ret = btrfs_log_inode(trans, BTRFS_I(inode), +- LOG_INODE_ALL, ctx); +- btrfs_add_delayed_iput(BTRFS_I(inode)); ++ ret = btrfs_log_inode(trans, inode, LOG_INODE_ALL, ctx); ++ btrfs_add_delayed_iput(inode); + if (ret) + break; + continue; +@@ -5787,8 +5853,8 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, + * it again because if some other task logged the inode after + * that, we can avoid doing it again. + */ +- if (!need_log_inode(trans, BTRFS_I(inode))) { +- btrfs_add_delayed_iput(BTRFS_I(inode)); ++ if (!need_log_inode(trans, inode)) { ++ btrfs_add_delayed_iput(inode); + continue; + } + +@@ -5799,8 +5865,8 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, + * well because during a rename we pin the log and update the + * log with the new name before we unpin it. + */ +- ret = btrfs_log_inode(trans, BTRFS_I(inode), LOG_INODE_EXISTS, ctx); +- btrfs_add_delayed_iput(BTRFS_I(inode)); ++ ret = btrfs_log_inode(trans, inode, LOG_INODE_EXISTS, ctx); ++ btrfs_add_delayed_iput(inode); + if (ret) + break; + } +@@ -6292,7 +6358,7 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans, + + list_for_each_entry(item, delayed_ins_list, log_list) { + struct btrfs_dir_item *dir_item; +- struct inode *di_inode; ++ struct btrfs_inode *di_inode; + struct btrfs_key key; + int log_mode = LOG_INODE_EXISTS; + +@@ -6308,8 +6374,8 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans, + break; + } + +- if (!need_log_inode(trans, BTRFS_I(di_inode))) { +- btrfs_add_delayed_iput(BTRFS_I(di_inode)); ++ if (!need_log_inode(trans, di_inode)) { ++ btrfs_add_delayed_iput(di_inode); + continue; + } + +@@ -6317,12 +6383,12 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans, + log_mode = LOG_INODE_ALL; + + ctx->log_new_dentries = false; +- ret = btrfs_log_inode(trans, BTRFS_I(di_inode), log_mode, ctx); ++ ret = btrfs_log_inode(trans, di_inode, log_mode, ctx); + + if (!ret && ctx->log_new_dentries) +- ret = log_new_dir_dentries(trans, BTRFS_I(di_inode), ctx); ++ ret = log_new_dir_dentries(trans, di_inode, ctx); + +- btrfs_add_delayed_iput(BTRFS_I(di_inode)); ++ btrfs_add_delayed_iput(di_inode); + + if (ret) + break; +@@ -6730,7 +6796,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, + ptr = btrfs_item_ptr_offset(leaf, slot); + while (cur_offset < item_size) { + struct btrfs_key inode_key; +- struct inode *dir_inode; ++ struct btrfs_inode *dir_inode; + + inode_key.type = BTRFS_INODE_ITEM_KEY; + inode_key.offset = 0; +@@ -6779,18 +6845,16 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, + goto out; + } + +- if (!need_log_inode(trans, BTRFS_I(dir_inode))) { +- btrfs_add_delayed_iput(BTRFS_I(dir_inode)); ++ if (!need_log_inode(trans, dir_inode)) { ++ btrfs_add_delayed_iput(dir_inode); + continue; + } + + ctx->log_new_dentries = false; +- ret = btrfs_log_inode(trans, BTRFS_I(dir_inode), +- LOG_INODE_ALL, ctx); ++ ret = btrfs_log_inode(trans, dir_inode, LOG_INODE_ALL, ctx); + if (!ret && ctx->log_new_dentries) +- ret = log_new_dir_dentries(trans, +- BTRFS_I(dir_inode), ctx); +- btrfs_add_delayed_iput(BTRFS_I(dir_inode)); ++ ret = log_new_dir_dentries(trans, dir_inode, ctx); ++ btrfs_add_delayed_iput(dir_inode); + if (ret) + goto out; + } +@@ -6815,7 +6879,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans, + struct extent_buffer *leaf; + int slot; + struct btrfs_key search_key; +- struct inode *inode; ++ struct btrfs_inode *inode; + u64 ino; + int ret = 0; + +@@ -6830,11 +6894,10 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans, + if (IS_ERR(inode)) + return PTR_ERR(inode); + +- if (BTRFS_I(inode)->generation >= trans->transid && +- need_log_inode(trans, BTRFS_I(inode))) +- ret = btrfs_log_inode(trans, BTRFS_I(inode), +- LOG_INODE_EXISTS, ctx); +- btrfs_add_delayed_iput(BTRFS_I(inode)); ++ if (inode->generation >= trans->transid && ++ need_log_inode(trans, inode)) ++ ret = btrfs_log_inode(trans, inode, LOG_INODE_EXISTS, ctx); ++ btrfs_add_delayed_iput(inode); + if (ret) + return ret; + +diff --git a/fs/erofs/data.c b/fs/erofs/data.c +index 19ab9bb3a9a0e1..51250ac184a528 100644 +--- a/fs/erofs/data.c ++++ b/fs/erofs/data.c +@@ -358,6 +358,8 @@ int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, + */ + static int erofs_read_folio(struct file *file, struct folio *folio) + { ++ trace_erofs_read_folio(folio, true); ++ + return iomap_read_folio(folio, &erofs_iomap_ops); + } + +diff --git a/fs/eventpoll.c b/fs/eventpoll.c +index cde5a15b129f63..31b32d9e7bbcea 100644 +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -772,7 +772,7 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force) + call_rcu(&epi->rcu, epi_rcu_free); + + percpu_counter_dec(&ep->user->epoll_watches); +- return ep_refcount_dec_and_test(ep); ++ return true; + } + + /* +@@ -780,14 +780,14 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force) + */ + static void ep_remove_safe(struct eventpoll *ep, struct epitem *epi) + { +- WARN_ON_ONCE(__ep_remove(ep, epi, false)); ++ if (__ep_remove(ep, epi, false)) ++ WARN_ON_ONCE(ep_refcount_dec_and_test(ep)); + } + + static void ep_clear_and_put(struct eventpoll *ep) + { + struct rb_node *rbp, *next; + struct epitem *epi; +- bool dispose; + + /* We need to release all tasks waiting for these file */ + if (waitqueue_active(&ep->poll_wait)) +@@ -820,10 +820,8 @@ static void ep_clear_and_put(struct eventpoll *ep) + cond_resched(); + } + +- dispose = ep_refcount_dec_and_test(ep); + mutex_unlock(&ep->mtx); +- +- if (dispose) ++ if (ep_refcount_dec_and_test(ep)) + ep_free(ep); + } + +@@ -1003,7 +1001,7 @@ void eventpoll_release_file(struct file *file) + dispose = __ep_remove(ep, epi, true); + mutex_unlock(&ep->mtx); + +- if (dispose) ++ if (dispose && ep_refcount_dec_and_test(ep)) + ep_free(ep); + goto again; + } +diff --git a/fs/proc/inode.c b/fs/proc/inode.c +index 897c71077a0f7e..4b3ae7e0def32e 100644 +--- a/fs/proc/inode.c ++++ b/fs/proc/inode.c +@@ -52,7 +52,7 @@ static void proc_evict_inode(struct inode *inode) + + head = ei->sysctl; + if (head) { +- RCU_INIT_POINTER(ei->sysctl, NULL); ++ WRITE_ONCE(ei->sysctl, NULL); + proc_sys_evict_inode(inode, head); + } + } +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c +index 071a71eb1a2d43..b7d6bc5c1ce9a9 100644 +--- a/fs/proc/proc_sysctl.c ++++ b/fs/proc/proc_sysctl.c +@@ -920,17 +920,21 @@ static int proc_sys_compare(const struct dentry *dentry, + struct ctl_table_header *head; + struct inode *inode; + +- /* Although proc doesn't have negative dentries, rcu-walk means +- * that inode here can be NULL */ +- /* AV: can it, indeed? */ +- inode = d_inode_rcu(dentry); +- if (!inode) +- return 1; + if (name->len != len) + return 1; + if (memcmp(name->name, str, len)) + return 1; +- head = rcu_dereference(PROC_I(inode)->sysctl); ++ ++ // false positive is fine here - we'll recheck anyway ++ if (d_in_lookup(dentry)) ++ return 0; ++ ++ inode = d_inode_rcu(dentry); ++ // we just might have run into dentry in the middle of __dentry_kill() ++ if (!inode) ++ return 1; ++ ++ head = READ_ONCE(PROC_I(inode)->sysctl); + return !head || !sysctl_is_seen(head); + } + +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index b8640f36ebf8ab..d0896bf8e12f0f 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -33,9 +33,9 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) + unsigned long text, lib, swap, anon, file, shmem; + unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; + +- anon = get_mm_counter(mm, MM_ANONPAGES); +- file = get_mm_counter(mm, MM_FILEPAGES); +- shmem = get_mm_counter(mm, MM_SHMEMPAGES); ++ anon = get_mm_counter_sum(mm, MM_ANONPAGES); ++ file = get_mm_counter_sum(mm, MM_FILEPAGES); ++ shmem = get_mm_counter_sum(mm, MM_SHMEMPAGES); + + /* + * Note: to minimize their overhead, mm maintains hiwater_vm and +@@ -56,7 +56,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) + text = min(text, mm->exec_vm << PAGE_SHIFT); + lib = (mm->exec_vm << PAGE_SHIFT) - text; + +- swap = get_mm_counter(mm, MM_SWAPENTS); ++ swap = get_mm_counter_sum(mm, MM_SWAPENTS); + SEQ_PUT_DEC("VmPeak:\t", hiwater_vm); + SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm); + SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm); +@@ -89,12 +89,12 @@ unsigned long task_statm(struct mm_struct *mm, + unsigned long *shared, unsigned long *text, + unsigned long *data, unsigned long *resident) + { +- *shared = get_mm_counter(mm, MM_FILEPAGES) + +- get_mm_counter(mm, MM_SHMEMPAGES); ++ *shared = get_mm_counter_sum(mm, MM_FILEPAGES) + ++ get_mm_counter_sum(mm, MM_SHMEMPAGES); + *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) + >> PAGE_SHIFT; + *data = mm->data_vm + mm->stack_vm; +- *resident = *shared + get_mm_counter(mm, MM_ANONPAGES); ++ *resident = *shared + get_mm_counter_sum(mm, MM_ANONPAGES); + return mm->total_vm; + } + +diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h +index 5c856adf7be9ec..c9b37f2ebde853 100644 +--- a/fs/smb/client/cifsglob.h ++++ b/fs/smb/client/cifsglob.h +@@ -830,6 +830,7 @@ struct TCP_Server_Info { + * format: \\HOST\SHARE[\OPTIONAL PATH] + */ + char *leaf_fullpath; ++ bool dfs_conn:1; + }; + + static inline bool is_smb1(struct TCP_Server_Info *server) +@@ -1065,6 +1066,7 @@ struct cifs_ses { + struct list_head smb_ses_list; + struct list_head rlist; /* reconnect list */ + struct list_head tcon_list; ++ struct list_head dlist; /* dfs list */ + struct cifs_tcon *tcon_ipc; + spinlock_t ses_lock; /* protect anything here that is not protected */ + struct mutex session_mutex; +@@ -1294,6 +1296,7 @@ struct cifs_tcon { + /* BB add field for back pointer to sb struct(s)? */ + #ifdef CONFIG_CIFS_DFS_UPCALL + struct delayed_work dfs_cache_work; ++ struct list_head dfs_ses_list; + #endif + struct delayed_work query_interfaces; /* query interfaces workqueue job */ + char *origin_fullpath; /* canonical copy of smb3_fs_context::source */ +diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h +index c6d325666b5cd8..5ab877e480abcb 100644 +--- a/fs/smb/client/cifsproto.h ++++ b/fs/smb/client/cifsproto.h +@@ -136,6 +136,7 @@ extern int SendReceiveBlockingLock(const unsigned int xid, + struct smb_hdr *out_buf, + int *bytes_returned); + ++void smb2_query_server_interfaces(struct work_struct *work); + void + cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server, + bool all_channels); +@@ -737,15 +738,9 @@ static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options) + + int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry); + +-/* Put references of @ses and its children */ + static inline void cifs_put_smb_ses(struct cifs_ses *ses) + { +- struct cifs_ses *next; +- +- do { +- next = ses->dfs_root_ses; +- __cifs_put_smb_ses(ses); +- } while ((ses = next)); ++ __cifs_put_smb_ses(ses); + } + + /* Get an active reference of @ses and its children. +@@ -759,9 +754,7 @@ static inline void cifs_put_smb_ses(struct cifs_ses *ses) + static inline void cifs_smb_ses_inc_refcount(struct cifs_ses *ses) + { + lockdep_assert_held(&cifs_tcp_ses_lock); +- +- for (; ses; ses = ses->dfs_root_ses) +- ses->ses_count++; ++ ses->ses_count++; + } + + static inline bool dfs_src_pathname_equal(const char *s1, const char *s2) +diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c +index 8298d1745f9b9c..0588896c44567d 100644 +--- a/fs/smb/client/connect.c ++++ b/fs/smb/client/connect.c +@@ -113,7 +113,7 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server) + return rc; + } + +-static void smb2_query_server_interfaces(struct work_struct *work) ++void smb2_query_server_interfaces(struct work_struct *work) + { + int rc; + int xid; +@@ -1551,6 +1551,9 @@ static int match_server(struct TCP_Server_Info *server, + if (server->nosharesock) + return 0; + ++ if (!match_super && (ctx->dfs_conn || server->dfs_conn)) ++ return 0; ++ + /* If multidialect negotiation see if existing sessions match one */ + if (strcmp(ctx->vals->version_string, SMB3ANY_VERSION_STRING) == 0) { + if (server->vals->protocol_id < SMB30_PROT_ID) +@@ -1740,6 +1743,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx, + + if (ctx->nosharesock) + tcp_ses->nosharesock = true; ++ tcp_ses->dfs_conn = ctx->dfs_conn; + + tcp_ses->ops = ctx->ops; + tcp_ses->vals = ctx->vals; +@@ -1890,12 +1894,14 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx, + } + + /* this function must be called with ses_lock and chan_lock held */ +-static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx) ++static int match_session(struct cifs_ses *ses, ++ struct smb3_fs_context *ctx, ++ bool match_super) + { + struct TCP_Server_Info *server = ses->server; + enum securityEnum ctx_sec, ses_sec; + +- if (ctx->dfs_root_ses != ses->dfs_root_ses) ++ if (!match_super && ctx->dfs_root_ses != ses->dfs_root_ses) + return 0; + + /* +@@ -2047,7 +2053,7 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) + continue; + } + spin_lock(&ses->chan_lock); +- if (match_session(ses, ctx)) { ++ if (match_session(ses, ctx, false)) { + spin_unlock(&ses->chan_lock); + spin_unlock(&ses->ses_lock); + ret = ses; +@@ -2450,8 +2456,6 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) + * need to lock before changing something in the session. + */ + spin_lock(&cifs_tcp_ses_lock); +- if (ctx->dfs_root_ses) +- cifs_smb_ses_inc_refcount(ctx->dfs_root_ses); + ses->dfs_root_ses = ctx->dfs_root_ses; + list_add(&ses->smb_ses_list, &server->smb_ses_list); + spin_unlock(&cifs_tcp_ses_lock); +@@ -2528,6 +2532,7 @@ cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace) + { + unsigned int xid; + struct cifs_ses *ses; ++ LIST_HEAD(ses_list); + + /* + * IPC tcon share the lifetime of their session and are +@@ -2559,6 +2564,7 @@ cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace) + cancel_delayed_work_sync(&tcon->query_interfaces); + #ifdef CONFIG_CIFS_DFS_UPCALL + cancel_delayed_work_sync(&tcon->dfs_cache_work); ++ list_replace_init(&tcon->dfs_ses_list, &ses_list); + #endif + + if (tcon->use_witness) { +@@ -2579,6 +2585,9 @@ cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace) + cifs_fscache_release_super_cookie(tcon); + tconInfoFree(tcon, netfs_trace_tcon_ref_free); + cifs_put_smb_ses(ses); ++#ifdef CONFIG_CIFS_DFS_UPCALL ++ dfs_put_root_smb_sessions(&ses_list); ++#endif + } + + /** +@@ -2807,20 +2816,14 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx) + tcon->max_cached_dirs = ctx->max_cached_dirs; + tcon->nodelete = ctx->nodelete; + tcon->local_lease = ctx->local_lease; +- INIT_LIST_HEAD(&tcon->pending_opens); + tcon->status = TID_GOOD; + +- INIT_DELAYED_WORK(&tcon->query_interfaces, +- smb2_query_server_interfaces); + if (ses->server->dialect >= SMB30_PROT_ID && + (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) { + /* schedule query interfaces poll */ + queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, + (SMB_INTERFACE_POLL_INTERVAL * HZ)); + } +-#ifdef CONFIG_CIFS_DFS_UPCALL +- INIT_DELAYED_WORK(&tcon->dfs_cache_work, dfs_cache_refresh); +-#endif + spin_lock(&cifs_tcp_ses_lock); + list_add(&tcon->tcon_list, &ses->tcon_list); + spin_unlock(&cifs_tcp_ses_lock); +@@ -2962,7 +2965,7 @@ cifs_match_super(struct super_block *sb, void *data) + spin_lock(&ses->chan_lock); + spin_lock(&tcon->tc_lock); + if (!match_server(tcp_srv, ctx, true) || +- !match_session(ses, ctx) || ++ !match_session(ses, ctx, true) || + !match_tcon(tcon, ctx) || + !match_prepath(sb, tcon, mnt_data)) { + rc = 0; +@@ -3712,13 +3715,12 @@ int cifs_is_path_remote(struct cifs_mount_ctx *mnt_ctx) + int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) + { + struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, }; +- bool isdfs; + int rc; + +- rc = dfs_mount_share(&mnt_ctx, &isdfs); ++ rc = dfs_mount_share(&mnt_ctx); + if (rc) + goto error; +- if (!isdfs) ++ if (!ctx->dfs_conn) + goto out; + + /* +@@ -4135,7 +4137,7 @@ cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses) + } + + static struct cifs_tcon * +-__cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) ++cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) + { + int rc; + struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb); +@@ -4233,17 +4235,6 @@ __cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) + return tcon; + } + +-static struct cifs_tcon * +-cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) +-{ +- struct cifs_tcon *ret; +- +- cifs_mount_lock(); +- ret = __cifs_construct_tcon(cifs_sb, fsuid); +- cifs_mount_unlock(); +- return ret; +-} +- + struct cifs_tcon * + cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb) + { +diff --git a/fs/smb/client/dfs.c b/fs/smb/client/dfs.c +index bd259b04cdede0..c35953843373ea 100644 +--- a/fs/smb/client/dfs.c ++++ b/fs/smb/client/dfs.c +@@ -69,7 +69,7 @@ static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path) + * Get an active reference of @ses so that next call to cifs_put_tcon() won't + * release it as any new DFS referrals must go through its IPC tcon. + */ +-static void add_root_smb_session(struct cifs_mount_ctx *mnt_ctx) ++static void set_root_smb_session(struct cifs_mount_ctx *mnt_ctx) + { + struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; + struct cifs_ses *ses = mnt_ctx->ses; +@@ -95,7 +95,7 @@ static inline int parse_dfs_target(struct smb3_fs_context *ctx, + return rc; + } + +-static int set_ref_paths(struct cifs_mount_ctx *mnt_ctx, ++static int setup_dfs_ref(struct cifs_mount_ctx *mnt_ctx, + struct dfs_info3_param *tgt, + struct dfs_ref_walk *rw) + { +@@ -120,6 +120,7 @@ static int set_ref_paths(struct cifs_mount_ctx *mnt_ctx, + } + ref_walk_path(rw) = ref_path; + ref_walk_fpath(rw) = full_path; ++ ref_walk_ses(rw) = ctx->dfs_root_ses; + return 0; + } + +@@ -128,11 +129,11 @@ static int __dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx, + { + struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; + struct dfs_info3_param tgt = {}; +- bool is_refsrv; + int rc = -ENOENT; + + again: + do { ++ ctx->dfs_root_ses = ref_walk_ses(rw); + if (ref_walk_empty(rw)) { + rc = dfs_get_referral(mnt_ctx, ref_walk_path(rw) + 1, + NULL, ref_walk_tl(rw)); +@@ -158,10 +159,7 @@ static int __dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx, + if (rc) + continue; + +- is_refsrv = tgt.server_type == DFS_TYPE_ROOT || +- DFS_INTERLINK(tgt.flags); + ref_walk_set_tgt_hint(rw); +- + if (tgt.flags & DFSREF_STORAGE_SERVER) { + rc = cifs_mount_get_tcon(mnt_ctx); + if (!rc) +@@ -172,12 +170,10 @@ static int __dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx, + continue; + } + +- if (is_refsrv) +- add_root_smb_session(mnt_ctx); +- ++ set_root_smb_session(mnt_ctx); + rc = ref_walk_advance(rw); + if (!rc) { +- rc = set_ref_paths(mnt_ctx, &tgt, rw); ++ rc = setup_dfs_ref(mnt_ctx, &tgt, rw); + if (!rc) { + rc = -EREMOTE; + goto again; +@@ -193,20 +189,22 @@ static int __dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx, + return rc; + } + +-static int dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx) ++static int dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx, ++ struct dfs_ref_walk **rw) + { +- struct dfs_ref_walk *rw; + int rc; + +- rw = ref_walk_alloc(); +- if (IS_ERR(rw)) +- return PTR_ERR(rw); ++ *rw = ref_walk_alloc(); ++ if (IS_ERR(*rw)) { ++ rc = PTR_ERR(*rw); ++ *rw = NULL; ++ return rc; ++ } + +- ref_walk_init(rw); +- rc = set_ref_paths(mnt_ctx, NULL, rw); ++ ref_walk_init(*rw); ++ rc = setup_dfs_ref(mnt_ctx, NULL, *rw); + if (!rc) +- rc = __dfs_referral_walk(mnt_ctx, rw); +- ref_walk_free(rw); ++ rc = __dfs_referral_walk(mnt_ctx, *rw); + return rc; + } + +@@ -214,16 +212,16 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx) + { + struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb; + struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; ++ struct dfs_ref_walk *rw = NULL; + struct cifs_tcon *tcon; + char *origin_fullpath; +- bool new_tcon = true; + int rc; + + origin_fullpath = dfs_get_path(cifs_sb, ctx->source); + if (IS_ERR(origin_fullpath)) + return PTR_ERR(origin_fullpath); + +- rc = dfs_referral_walk(mnt_ctx); ++ rc = dfs_referral_walk(mnt_ctx, &rw); + if (!rc) { + /* + * Prevent superblock from being created with any missing +@@ -241,21 +239,16 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx) + + tcon = mnt_ctx->tcon; + spin_lock(&tcon->tc_lock); +- if (!tcon->origin_fullpath) { +- tcon->origin_fullpath = origin_fullpath; +- origin_fullpath = NULL; +- } else { +- new_tcon = false; +- } ++ tcon->origin_fullpath = origin_fullpath; ++ origin_fullpath = NULL; ++ ref_walk_set_tcon(rw, tcon); + spin_unlock(&tcon->tc_lock); +- +- if (new_tcon) { +- queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work, +- dfs_cache_get_ttl() * HZ); +- } ++ queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work, ++ dfs_cache_get_ttl() * HZ); + + out: + kfree(origin_fullpath); ++ ref_walk_free(rw); + return rc; + } + +@@ -279,7 +272,7 @@ static int update_fs_context_dstaddr(struct smb3_fs_context *ctx) + return rc; + } + +-int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs) ++int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx) + { + struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; + bool nodfs = ctx->nodfs; +@@ -289,7 +282,6 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs) + if (rc) + return rc; + +- *isdfs = false; + rc = get_session(mnt_ctx, NULL); + if (rc) + return rc; +@@ -317,10 +309,15 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs) + return rc; + } + +- *isdfs = true; +- add_root_smb_session(mnt_ctx); +- rc = __dfs_mount_share(mnt_ctx); +- dfs_put_root_smb_sessions(mnt_ctx); ++ if (!ctx->dfs_conn) { ++ ctx->dfs_conn = true; ++ cifs_mount_put_conns(mnt_ctx); ++ rc = get_session(mnt_ctx, NULL); ++ } ++ if (!rc) { ++ set_root_smb_session(mnt_ctx); ++ rc = __dfs_mount_share(mnt_ctx); ++ } + return rc; + } + +diff --git a/fs/smb/client/dfs.h b/fs/smb/client/dfs.h +index e5c4dcf837503a..1aa2bc65b3bc2c 100644 +--- a/fs/smb/client/dfs.h ++++ b/fs/smb/client/dfs.h +@@ -19,6 +19,7 @@ + struct dfs_ref { + char *path; + char *full_path; ++ struct cifs_ses *ses; + struct dfs_cache_tgt_list tl; + struct dfs_cache_tgt_iterator *tit; + }; +@@ -38,6 +39,7 @@ struct dfs_ref_walk { + #define ref_walk_path(w) (ref_walk_cur(w)->path) + #define ref_walk_fpath(w) (ref_walk_cur(w)->full_path) + #define ref_walk_tl(w) (&ref_walk_cur(w)->tl) ++#define ref_walk_ses(w) (ref_walk_cur(w)->ses) + + static inline struct dfs_ref_walk *ref_walk_alloc(void) + { +@@ -60,14 +62,19 @@ static inline void __ref_walk_free(struct dfs_ref *ref) + kfree(ref->path); + kfree(ref->full_path); + dfs_cache_free_tgts(&ref->tl); ++ if (ref->ses) ++ cifs_put_smb_ses(ref->ses); + memset(ref, 0, sizeof(*ref)); + } + + static inline void ref_walk_free(struct dfs_ref_walk *rw) + { +- struct dfs_ref *ref = ref_walk_start(rw); ++ struct dfs_ref *ref; + +- for (; ref <= ref_walk_end(rw); ref++) ++ if (!rw) ++ return; ++ ++ for (ref = ref_walk_start(rw); ref <= ref_walk_end(rw); ref++) + __ref_walk_free(ref); + kfree(rw); + } +@@ -116,9 +123,22 @@ static inline void ref_walk_set_tgt_hint(struct dfs_ref_walk *rw) + ref_walk_tit(rw)); + } + ++static inline void ref_walk_set_tcon(struct dfs_ref_walk *rw, ++ struct cifs_tcon *tcon) ++{ ++ struct dfs_ref *ref = ref_walk_start(rw); ++ ++ for (; ref <= ref_walk_cur(rw); ref++) { ++ if (WARN_ON_ONCE(!ref->ses)) ++ continue; ++ list_add(&ref->ses->dlist, &tcon->dfs_ses_list); ++ ref->ses = NULL; ++ } ++} ++ + int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_param *ref, + struct smb3_fs_context *ctx); +-int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs); ++int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx); + + static inline char *dfs_get_path(struct cifs_sb_info *cifs_sb, const char *path) + { +@@ -142,20 +162,14 @@ static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, const char *p + * references of all DFS root sessions that were used across the mount process + * in dfs_mount_share(). + */ +-static inline void dfs_put_root_smb_sessions(struct cifs_mount_ctx *mnt_ctx) ++static inline void dfs_put_root_smb_sessions(struct list_head *head) + { +- const struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; +- struct cifs_ses *ses = ctx->dfs_root_ses; +- struct cifs_ses *cur; +- +- if (!ses) +- return; ++ struct cifs_ses *ses, *n; + +- for (cur = ses; cur; cur = cur->dfs_root_ses) { +- if (cur->dfs_root_ses) +- cifs_put_smb_ses(cur->dfs_root_ses); ++ list_for_each_entry_safe(ses, n, head, dlist) { ++ list_del_init(&ses->dlist); ++ cifs_put_smb_ses(ses); + } +- cifs_put_smb_ses(ses); + } + + #endif /* _CIFS_DFS_H */ +diff --git a/fs/smb/client/dfs_cache.c b/fs/smb/client/dfs_cache.c +index 11c8efecf7aa12..433f546055b977 100644 +--- a/fs/smb/client/dfs_cache.c ++++ b/fs/smb/client/dfs_cache.c +@@ -1095,16 +1095,18 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, + return 0; + } + +-static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2) ++static bool target_share_equal(struct cifs_tcon *tcon, const char *s1) + { +- char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0}; ++ struct TCP_Server_Info *server = tcon->ses->server; ++ struct sockaddr_storage ss; + const char *host; ++ const char *s2 = &tcon->tree_name[1]; + size_t hostlen; +- struct sockaddr_storage ss; ++ char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0}; + bool match; + int rc; + +- if (strcasecmp(s1, s2)) ++ if (strcasecmp(s2, s1)) + return false; + + /* +@@ -1128,34 +1130,6 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c + return match; + } + +-/* +- * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new +- * target shares in @refs. +- */ +-static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server, +- const char *path, +- struct dfs_cache_tgt_list *old_tl, +- struct dfs_cache_tgt_list *new_tl) +-{ +- struct dfs_cache_tgt_iterator *oit, *nit; +- +- for (oit = dfs_cache_get_tgt_iterator(old_tl); oit; +- oit = dfs_cache_get_next_tgt(old_tl, oit)) { +- for (nit = dfs_cache_get_tgt_iterator(new_tl); nit; +- nit = dfs_cache_get_next_tgt(new_tl, nit)) { +- if (target_share_equal(server, +- dfs_cache_get_tgt_name(oit), +- dfs_cache_get_tgt_name(nit))) { +- dfs_cache_noreq_update_tgthint(path, nit); +- return; +- } +- } +- } +- +- cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__); +- cifs_signal_cifsd_for_reconnect(server, true); +-} +- + static bool is_ses_good(struct cifs_ses *ses) + { + struct TCP_Server_Info *server = ses->server; +@@ -1172,41 +1146,35 @@ static bool is_ses_good(struct cifs_ses *ses) + return ret; + } + +-/* Refresh dfs referral of @ses and mark it for reconnect if needed */ +-static void __refresh_ses_referral(struct cifs_ses *ses, bool force_refresh) ++static char *get_ses_refpath(struct cifs_ses *ses) + { + struct TCP_Server_Info *server = ses->server; +- DFS_CACHE_TGT_LIST(old_tl); +- DFS_CACHE_TGT_LIST(new_tl); +- bool needs_refresh = false; +- struct cache_entry *ce; +- unsigned int xid; +- char *path = NULL; +- int rc = 0; +- +- xid = get_xid(); ++ char *path = ERR_PTR(-ENOENT); + + mutex_lock(&server->refpath_lock); + if (server->leaf_fullpath) { + path = kstrdup(server->leaf_fullpath + 1, GFP_ATOMIC); + if (!path) +- rc = -ENOMEM; ++ path = ERR_PTR(-ENOMEM); + } + mutex_unlock(&server->refpath_lock); +- if (!path) +- goto out; ++ return path; ++} + +- down_read(&htable_rw_lock); +- ce = lookup_cache_entry(path); +- needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce); +- if (!IS_ERR(ce)) { +- rc = get_targets(ce, &old_tl); +- cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc); +- } +- up_read(&htable_rw_lock); ++/* Refresh dfs referral of @ses */ ++static void refresh_ses_referral(struct cifs_ses *ses) ++{ ++ struct cache_entry *ce; ++ unsigned int xid; ++ char *path; ++ int rc = 0; + +- if (!needs_refresh) { +- rc = 0; ++ xid = get_xid(); ++ ++ path = get_ses_refpath(ses); ++ if (IS_ERR(path)) { ++ rc = PTR_ERR(path); ++ path = NULL; + goto out; + } + +@@ -1217,29 +1185,106 @@ static void __refresh_ses_referral(struct cifs_ses *ses, bool force_refresh) + goto out; + } + +- ce = cache_refresh_path(xid, ses, path, true); +- if (!IS_ERR(ce)) { +- rc = get_targets(ce, &new_tl); ++ ce = cache_refresh_path(xid, ses, path, false); ++ if (!IS_ERR(ce)) + up_read(&htable_rw_lock); +- cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc); +- mark_for_reconnect_if_needed(server, path, &old_tl, &new_tl); +- } ++ else ++ rc = PTR_ERR(ce); + + out: + free_xid(xid); +- dfs_cache_free_tgts(&old_tl); +- dfs_cache_free_tgts(&new_tl); + kfree(path); + } + +-static inline void refresh_ses_referral(struct cifs_ses *ses) ++static int __refresh_tcon_referral(struct cifs_tcon *tcon, ++ const char *path, ++ struct dfs_info3_param *refs, ++ int numrefs, bool force_refresh) + { +- __refresh_ses_referral(ses, false); ++ struct cache_entry *ce; ++ bool reconnect = force_refresh; ++ int rc = 0; ++ int i; ++ ++ if (unlikely(!numrefs)) ++ return 0; ++ ++ if (force_refresh) { ++ for (i = 0; i < numrefs; i++) { ++ /* TODO: include prefix paths in the matching */ ++ if (target_share_equal(tcon, refs[i].node_name)) { ++ reconnect = false; ++ break; ++ } ++ } ++ } ++ ++ down_write(&htable_rw_lock); ++ ce = lookup_cache_entry(path); ++ if (!IS_ERR(ce)) { ++ if (force_refresh || cache_entry_expired(ce)) ++ rc = update_cache_entry_locked(ce, refs, numrefs); ++ } else if (PTR_ERR(ce) == -ENOENT) { ++ ce = add_cache_entry_locked(refs, numrefs); ++ } ++ up_write(&htable_rw_lock); ++ ++ if (IS_ERR(ce)) ++ rc = PTR_ERR(ce); ++ if (reconnect) { ++ cifs_tcon_dbg(FYI, "%s: mark for reconnect\n", __func__); ++ cifs_signal_cifsd_for_reconnect(tcon->ses->server, true); ++ } ++ return rc; + } + +-static inline void force_refresh_ses_referral(struct cifs_ses *ses) ++static void refresh_tcon_referral(struct cifs_tcon *tcon, bool force_refresh) + { +- __refresh_ses_referral(ses, true); ++ struct dfs_info3_param *refs = NULL; ++ struct cache_entry *ce; ++ struct cifs_ses *ses; ++ unsigned int xid; ++ bool needs_refresh; ++ char *path; ++ int numrefs = 0; ++ int rc = 0; ++ ++ xid = get_xid(); ++ ses = tcon->ses; ++ ++ path = get_ses_refpath(ses); ++ if (IS_ERR(path)) { ++ rc = PTR_ERR(path); ++ path = NULL; ++ goto out; ++ } ++ ++ down_read(&htable_rw_lock); ++ ce = lookup_cache_entry(path); ++ needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce); ++ if (!needs_refresh) { ++ up_read(&htable_rw_lock); ++ goto out; ++ } ++ up_read(&htable_rw_lock); ++ ++ ses = CIFS_DFS_ROOT_SES(ses); ++ if (!is_ses_good(ses)) { ++ cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", ++ __func__); ++ goto out; ++ } ++ ++ rc = get_dfs_referral(xid, ses, path, &refs, &numrefs); ++ if (!rc) { ++ rc = __refresh_tcon_referral(tcon, path, refs, ++ numrefs, force_refresh); ++ } ++ ++out: ++ free_xid(xid); ++ kfree(path); ++ free_dfs_info_array(refs, numrefs); + } + + /** +@@ -1280,7 +1325,7 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb) + */ + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; + +- force_refresh_ses_referral(tcon->ses); ++ refresh_tcon_referral(tcon, true); + return 0; + } + +@@ -1292,8 +1337,9 @@ void dfs_cache_refresh(struct work_struct *work) + + tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work); + +- for (ses = tcon->ses; ses; ses = ses->dfs_root_ses) ++ list_for_each_entry(ses, &tcon->dfs_ses_list, dlist) + refresh_ses_referral(ses); ++ refresh_tcon_referral(tcon, false); + + queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work, + atomic_read(&dfs_cache_ttl) * HZ); +diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h +index d0a2043ea44682..52ee72e562f5f6 100644 +--- a/fs/smb/client/fs_context.h ++++ b/fs/smb/client/fs_context.h +@@ -287,6 +287,7 @@ struct smb3_fs_context { + struct cifs_ses *dfs_root_ses; + bool dfs_automount:1; /* set for dfs automount only */ + enum cifs_reparse_type reparse_type; ++ bool dfs_conn:1; /* set for dfs mounts */ + }; + + extern const struct fs_parameter_spec smb3_fs_parameters[]; +diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c +index 9e8e0a01ae8eb0..bbbe48447765de 100644 +--- a/fs/smb/client/misc.c ++++ b/fs/smb/client/misc.c +@@ -145,6 +145,15 @@ tcon_info_alloc(bool dir_leases_enabled, enum smb3_tcon_ref_trace trace) + mutex_init(&ret_buf->fscache_lock); + #endif + trace_smb3_tcon_ref(ret_buf->debug_id, ret_buf->tc_count, trace); ++#ifdef CONFIG_CIFS_DFS_UPCALL ++ INIT_LIST_HEAD(&ret_buf->dfs_ses_list); ++#endif ++ INIT_LIST_HEAD(&ret_buf->pending_opens); ++ INIT_DELAYED_WORK(&ret_buf->query_interfaces, ++ smb2_query_server_interfaces); ++#ifdef CONFIG_CIFS_DFS_UPCALL ++ INIT_DELAYED_WORK(&ret_buf->dfs_cache_work, dfs_cache_refresh); ++#endif + + return ret_buf; + } +diff --git a/fs/smb/client/namespace.c b/fs/smb/client/namespace.c +index ec58c0e507244b..a6655807c0865a 100644 +--- a/fs/smb/client/namespace.c ++++ b/fs/smb/client/namespace.c +@@ -260,7 +260,7 @@ static struct vfsmount *cifs_do_automount(struct path *path) + ctx->source = NULL; + goto out; + } +- ctx->dfs_automount = is_dfs_mount(mntpt); ++ ctx->dfs_automount = ctx->dfs_conn = is_dfs_mount(mntpt); + cifs_dbg(FYI, "%s: ctx: source=%s UNC=%s prepath=%s dfs_automount=%d\n", + __func__, ctx->source, ctx->UNC, ctx->prepath, ctx->dfs_automount); + +diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c +index 6c22240368abf4..e25c2ca56461ac 100644 +--- a/fs/smb/server/smb2pdu.c ++++ b/fs/smb/server/smb2pdu.c +@@ -8503,11 +8503,6 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work) + goto err_out; + } + +- opinfo->op_state = OPLOCK_STATE_NONE; +- wake_up_interruptible_all(&opinfo->oplock_q); +- opinfo_put(opinfo); +- ksmbd_fd_put(work, fp); +- + rsp->StructureSize = cpu_to_le16(24); + rsp->OplockLevel = rsp_oplevel; + rsp->Reserved = 0; +@@ -8515,16 +8510,15 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work) + rsp->VolatileFid = volatile_id; + rsp->PersistentFid = persistent_id; + ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_oplock_break)); +- if (!ret) +- return; +- ++ if (ret) { + err_out: ++ smb2_set_err_rsp(work); ++ } ++ + opinfo->op_state = OPLOCK_STATE_NONE; + wake_up_interruptible_all(&opinfo->oplock_q); +- + opinfo_put(opinfo); + ksmbd_fd_put(work, fp); +- smb2_set_err_rsp(work); + } + + static int check_lease_state(struct lease *lease, __le32 req_state) +@@ -8654,11 +8648,6 @@ static void smb21_lease_break_ack(struct ksmbd_work *work) + } + + lease_state = lease->state; +- opinfo->op_state = OPLOCK_STATE_NONE; +- wake_up_interruptible_all(&opinfo->oplock_q); +- atomic_dec(&opinfo->breaking_cnt); +- wake_up_interruptible_all(&opinfo->oplock_brk); +- opinfo_put(opinfo); + + rsp->StructureSize = cpu_to_le16(36); + rsp->Reserved = 0; +@@ -8667,16 +8656,16 @@ static void smb21_lease_break_ack(struct ksmbd_work *work) + rsp->LeaseState = lease_state; + rsp->LeaseDuration = 0; + ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_lease_ack)); +- if (!ret) +- return; +- ++ if (ret) { + err_out: ++ smb2_set_err_rsp(work); ++ } ++ ++ opinfo->op_state = OPLOCK_STATE_NONE; + wake_up_interruptible_all(&opinfo->oplock_q); + atomic_dec(&opinfo->breaking_cnt); + wake_up_interruptible_all(&opinfo->oplock_brk); +- + opinfo_put(opinfo); +- smb2_set_err_rsp(work); + } + + /** +diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c +index 8faa25c6e129b5..7b6639949c250c 100644 +--- a/fs/smb/server/transport_rdma.c ++++ b/fs/smb/server/transport_rdma.c +@@ -426,7 +426,8 @@ static void free_transport(struct smb_direct_transport *t) + if (t->qp) { + ib_drain_qp(t->qp); + ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs); +- ib_destroy_qp(t->qp); ++ t->qp = NULL; ++ rdma_destroy_qp(t->cm_id); + } + + ksmbd_debug(RDMA, "drain the reassembly queue\n"); +@@ -1934,8 +1935,8 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t, + return 0; + err: + if (t->qp) { +- ib_destroy_qp(t->qp); + t->qp = NULL; ++ rdma_destroy_qp(t->cm_id); + } + if (t->recv_cq) { + ib_destroy_cq(t->recv_cq); +diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c +index 3bbf2382706056..de813700f3d4f6 100644 +--- a/fs/smb/server/vfs.c ++++ b/fs/smb/server/vfs.c +@@ -1293,6 +1293,7 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name, + + err = ksmbd_vfs_lock_parent(parent_path->dentry, path->dentry); + if (err) { ++ mnt_drop_write(parent_path->mnt); + path_put(path); + path_put(parent_path); + } +diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h +index cc61f6a2b2ad6e..0cb5edd9c46711 100644 +--- a/include/drm/drm_file.h ++++ b/include/drm/drm_file.h +@@ -302,6 +302,9 @@ struct drm_file { + * + * Mapping of mm object handles to object pointers. Used by the GEM + * subsystem. Protected by @table_lock. ++ * ++ * Note that allocated entries might be NULL as a transient state when ++ * creating or deleting a handle. + */ + struct idr object_idr; + +diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h +index 0dcc07b6865484..990259873e1e30 100644 +--- a/include/drm/drm_framebuffer.h ++++ b/include/drm/drm_framebuffer.h +@@ -23,6 +23,7 @@ + #ifndef __DRM_FRAMEBUFFER_H__ + #define __DRM_FRAMEBUFFER_H__ + ++#include + #include + #include + #include +@@ -100,6 +101,8 @@ struct drm_framebuffer_funcs { + unsigned num_clips); + }; + ++#define DRM_FRAMEBUFFER_HAS_HANDLE_REF(_i) BIT(0u + (_i)) ++ + /** + * struct drm_framebuffer - frame buffer object + * +@@ -188,6 +191,10 @@ struct drm_framebuffer { + * DRM_MODE_FB_MODIFIERS. + */ + int flags; ++ /** ++ * @internal_flags: Framebuffer flags like DRM_FRAMEBUFFER_HAS_HANDLE_REF. ++ */ ++ unsigned int internal_flags; + /** + * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor + * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR +diff --git a/include/drm/spsc_queue.h b/include/drm/spsc_queue.h +index 125f096c88cb96..ee9df8cc67b730 100644 +--- a/include/drm/spsc_queue.h ++++ b/include/drm/spsc_queue.h +@@ -70,9 +70,11 @@ static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *n + + preempt_disable(); + ++ atomic_inc(&queue->job_count); ++ smp_mb__after_atomic(); ++ + tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); + WRITE_ONCE(*tail, node); +- atomic_inc(&queue->job_count); + + /* + * In case of first element verify new node will be visible to the consumer +diff --git a/include/linux/math.h b/include/linux/math.h +index dd4152711de7d5..ee754ec3dc929f 100644 +--- a/include/linux/math.h ++++ b/include/linux/math.h +@@ -34,6 +34,18 @@ + */ + #define round_down(x, y) ((x) & ~__round_mask(x, y)) + ++/** ++ * DIV_ROUND_UP_POW2 - divide and round up ++ * @n: numerator ++ * @d: denominator (must be a power of 2) ++ * ++ * Divides @n by @d and rounds up to next multiple of @d (which must be a power ++ * of 2). Avoids integer overflows that may occur with __KERNEL_DIV_ROUND_UP(). ++ * Performance is roughly equivalent to __KERNEL_DIV_ROUND_UP(). ++ */ ++#define DIV_ROUND_UP_POW2(n, d) \ ++ ((n) / (d) + !!((n) & ((d) - 1))) ++ + #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP + + #define DIV_ROUND_DOWN_ULL(ll, d) \ +diff --git a/include/linux/mm.h b/include/linux/mm.h +index 209370f6443666..ee26e37daa0a80 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -2551,6 +2551,11 @@ static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) + return percpu_counter_read_positive(&mm->rss_stat[member]); + } + ++static inline unsigned long get_mm_counter_sum(struct mm_struct *mm, int member) ++{ ++ return percpu_counter_sum_positive(&mm->rss_stat[member]); ++} ++ + void mm_trace_rss_stat(struct mm_struct *mm, int member); + + static inline void add_mm_counter(struct mm_struct *mm, int member, long value) +diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h +index f8b09a82f62e1d..1820b87b8b7fff 100644 +--- a/include/net/af_vsock.h ++++ b/include/net/af_vsock.h +@@ -236,8 +236,8 @@ int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, + int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, + size_t len, int flags); + +-#ifdef CONFIG_BPF_SYSCALL + extern struct proto vsock_proto; ++#ifdef CONFIG_BPF_SYSCALL + int vsock_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); + void __init vsock_bpf_build_proto(void); + #else +diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h +index df7775afb92b93..0097791e1eede6 100644 +--- a/include/net/netfilter/nf_flow_table.h ++++ b/include/net/netfilter/nf_flow_table.h +@@ -353,7 +353,7 @@ static inline __be16 __nf_flow_pppoe_proto(const struct sk_buff *skb) + + static inline bool nf_flow_pppoe_proto(struct sk_buff *skb, __be16 *inner_proto) + { +- if (!pskb_may_pull(skb, PPPOE_SES_HLEN)) ++ if (!pskb_may_pull(skb, ETH_HLEN + PPPOE_SES_HLEN)) + return false; + + *inner_proto = __nf_flow_pppoe_proto(skb); +diff --git a/io_uring/opdef.c b/io_uring/opdef.c +index 3b9c6489b8b6d2..2d0a7db940fdb0 100644 +--- a/io_uring/opdef.c ++++ b/io_uring/opdef.c +@@ -202,6 +202,7 @@ const struct io_issue_def io_issue_defs[] = { + }, + [IORING_OP_FALLOCATE] = { + .needs_file = 1, ++ .hash_reg_file = 1, + .prep = io_fallocate_prep, + .issue = io_fallocate, + }, +diff --git a/kernel/bpf/bpf_lru_list.c b/kernel/bpf/bpf_lru_list.c +index 3dabdd137d1021..2d6e1c98d8adc3 100644 +--- a/kernel/bpf/bpf_lru_list.c ++++ b/kernel/bpf/bpf_lru_list.c +@@ -337,12 +337,12 @@ static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru, + list) { + __bpf_lru_node_move_to_free(l, node, local_free_list(loc_l), + BPF_LRU_LOCAL_LIST_T_FREE); +- if (++nfree == LOCAL_FREE_TARGET) ++ if (++nfree == lru->target_free) + break; + } + +- if (nfree < LOCAL_FREE_TARGET) +- __bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree, ++ if (nfree < lru->target_free) ++ __bpf_lru_list_shrink(lru, l, lru->target_free - nfree, + local_free_list(loc_l), + BPF_LRU_LOCAL_LIST_T_FREE); + +@@ -577,6 +577,9 @@ static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf, + list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]); + buf += elem_size; + } ++ ++ lru->target_free = clamp((nr_elems / num_possible_cpus()) / 2, ++ 1, LOCAL_FREE_TARGET); + } + + static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf, +diff --git a/kernel/bpf/bpf_lru_list.h b/kernel/bpf/bpf_lru_list.h +index cbd8d3720c2bbe..fe2661a58ea94a 100644 +--- a/kernel/bpf/bpf_lru_list.h ++++ b/kernel/bpf/bpf_lru_list.h +@@ -58,6 +58,7 @@ struct bpf_lru { + del_from_htab_func del_from_htab; + void *del_arg; + unsigned int hash_offset; ++ unsigned int target_free; + unsigned int nr_scans; + bool percpu; + }; +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 873b17545717cb..3a33d9c1b1b2b4 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -873,8 +873,6 @@ static void perf_cgroup_switch(struct task_struct *task) + if (READ_ONCE(cpuctx->cgrp) == NULL) + return; + +- WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0); +- + cgrp = perf_cgroup_from_task(task, NULL); + if (READ_ONCE(cpuctx->cgrp) == cgrp) + return; +@@ -886,6 +884,8 @@ static void perf_cgroup_switch(struct task_struct *task) + if (READ_ONCE(cpuctx->cgrp) == NULL) + return; + ++ WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0); ++ + perf_ctx_disable(&cpuctx->ctx, true); + + ctx_sched_out(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP); +@@ -10473,7 +10473,7 @@ static int perf_uprobe_event_init(struct perf_event *event) + if (event->attr.type != perf_uprobe.type) + return -ENOENT; + +- if (!perfmon_capable()) ++ if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + /* +diff --git a/kernel/rseq.c b/kernel/rseq.c +index 9de6e35fe67914..23894ba8250cf9 100644 +--- a/kernel/rseq.c ++++ b/kernel/rseq.c +@@ -149,6 +149,29 @@ static int rseq_reset_rseq_cpu_node_id(struct task_struct *t) + return 0; + } + ++/* ++ * Get the user-space pointer value stored in the 'rseq_cs' field. ++ */ ++static int rseq_get_rseq_cs_ptr_val(struct rseq __user *rseq, u64 *rseq_cs) ++{ ++ if (!rseq_cs) ++ return -EFAULT; ++ ++#ifdef CONFIG_64BIT ++ if (get_user(*rseq_cs, &rseq->rseq_cs)) ++ return -EFAULT; ++#else ++ if (copy_from_user(rseq_cs, &rseq->rseq_cs, sizeof(*rseq_cs))) ++ return -EFAULT; ++#endif ++ ++ return 0; ++} ++ ++/* ++ * If the rseq_cs field of 'struct rseq' contains a valid pointer to ++ * user-space, copy 'struct rseq_cs' from user-space and validate its fields. ++ */ + static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) + { + struct rseq_cs __user *urseq_cs; +@@ -157,17 +180,16 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) + u32 sig; + int ret; + +-#ifdef CONFIG_64BIT +- if (get_user(ptr, &t->rseq->rseq_cs)) +- return -EFAULT; +-#else +- if (copy_from_user(&ptr, &t->rseq->rseq_cs, sizeof(ptr))) +- return -EFAULT; +-#endif ++ ret = rseq_get_rseq_cs_ptr_val(t->rseq, &ptr); ++ if (ret) ++ return ret; ++ ++ /* If the rseq_cs pointer is NULL, return a cleared struct rseq_cs. */ + if (!ptr) { + memset(rseq_cs, 0, sizeof(*rseq_cs)); + return 0; + } ++ /* Check that the pointer value fits in the user-space process space. */ + if (ptr >= TASK_SIZE) + return -EINVAL; + urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr; +@@ -243,7 +265,7 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags) + return !!event_mask; + } + +-static int clear_rseq_cs(struct task_struct *t) ++static int clear_rseq_cs(struct rseq __user *rseq) + { + /* + * The rseq_cs field is set to NULL on preemption or signal +@@ -254,9 +276,9 @@ static int clear_rseq_cs(struct task_struct *t) + * Set rseq_cs to NULL. + */ + #ifdef CONFIG_64BIT +- return put_user(0UL, &t->rseq->rseq_cs); ++ return put_user(0UL, &rseq->rseq_cs); + #else +- if (clear_user(&t->rseq->rseq_cs, sizeof(t->rseq->rseq_cs))) ++ if (clear_user(&rseq->rseq_cs, sizeof(rseq->rseq_cs))) + return -EFAULT; + return 0; + #endif +@@ -288,11 +310,11 @@ static int rseq_ip_fixup(struct pt_regs *regs) + * Clear the rseq_cs pointer and return. + */ + if (!in_rseq_cs(ip, &rseq_cs)) +- return clear_rseq_cs(t); ++ return clear_rseq_cs(t->rseq); + ret = rseq_need_restart(t, rseq_cs.flags); + if (ret <= 0) + return ret; +- ret = clear_rseq_cs(t); ++ ret = clear_rseq_cs(t->rseq); + if (ret) + return ret; + trace_rseq_ip_fixup(ip, rseq_cs.start_ip, rseq_cs.post_commit_offset, +@@ -366,6 +388,7 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, + int, flags, u32, sig) + { + int ret; ++ u64 rseq_cs; + + if (flags & RSEQ_FLAG_UNREGISTER) { + if (flags & ~RSEQ_FLAG_UNREGISTER) +@@ -420,6 +443,19 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, + return -EINVAL; + if (!access_ok(rseq, rseq_len)) + return -EFAULT; ++ ++ /* ++ * If the rseq_cs pointer is non-NULL on registration, clear it to ++ * avoid a potential segfault on return to user-space. The proper thing ++ * to do would have been to fail the registration but this would break ++ * older libcs that reuse the rseq area for new threads without ++ * clearing the fields. ++ */ ++ if (rseq_get_rseq_cs_ptr_val(rseq, &rseq_cs)) ++ return -EFAULT; ++ if (rseq_cs && clear_rseq_cs(rseq)) ++ return -EFAULT; ++ + current->rseq = rseq; + current->rseq_len = rseq_len; + current->rseq_sig = sig; +diff --git a/lib/maple_tree.c b/lib/maple_tree.c +index a4a2592413b1b6..6f7a2c9cf922a2 100644 +--- a/lib/maple_tree.c ++++ b/lib/maple_tree.c +@@ -5270,6 +5270,7 @@ static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt, + struct maple_enode *start; + + if (mte_is_leaf(enode)) { ++ mte_set_node_dead(enode); + node->type = mte_node_type(enode); + goto free_leaf; + } +@@ -5497,7 +5498,7 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp) + /* At this point, we are at the leaf node that needs to be altered. */ + /* Exact fit, no nodes needed. */ + if (wr_mas.r_min == mas->index && wr_mas.r_max == mas->last) +- return 0; ++ goto set_flag; + + mas_wr_end_piv(&wr_mas); + node_size = mas_wr_new_end(&wr_mas); +@@ -5506,10 +5507,10 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp) + if (node_size == wr_mas.node_end) { + /* reuse node */ + if (!mt_in_rcu(mas->tree)) +- return 0; ++ goto set_flag; + /* shifting boundary */ + if (wr_mas.offset_end - mas->offset == 1) +- return 0; ++ goto set_flag; + } + + if (node_size >= mt_slots[wr_mas.type]) { +@@ -5528,10 +5529,13 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp) + + /* node store, slot store needs one node */ + ask_now: ++ mas->mas_flags &= ~MA_STATE_PREALLOC; + mas_node_count_gfp(mas, request, gfp); +- mas->mas_flags |= MA_STATE_PREALLOC; +- if (likely(!mas_is_err(mas))) ++ if (likely(!mas_is_err(mas))) { ++set_flag: ++ mas->mas_flags |= MA_STATE_PREALLOC; + return 0; ++ } + + mas_set_alloc_req(mas, 0); + ret = xa_err(mas->node); +diff --git a/mm/kasan/report.c b/mm/kasan/report.c +index 465e6a53b3bf25..44636fa953a723 100644 +--- a/mm/kasan/report.c ++++ b/mm/kasan/report.c +@@ -385,17 +385,8 @@ static void print_address_description(void *addr, u8 tag, + } + + if (is_vmalloc_addr(addr)) { +- struct vm_struct *va = find_vm_area(addr); +- +- if (va) { +- pr_err("The buggy address belongs to the virtual mapping at\n" +- " [%px, %px) created by:\n" +- " %pS\n", +- va->addr, va->addr + va->size, va->caller); +- pr_err("\n"); +- +- page = vmalloc_to_page(addr); +- } ++ pr_err("The buggy address %px belongs to a vmalloc virtual mapping\n", addr); ++ page = vmalloc_to_page(addr); + } + + if (page) { +diff --git a/mm/vmalloc.c b/mm/vmalloc.c +index fb947787f25da2..7eb92bcdbd8c16 100644 +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -459,6 +459,7 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, + unsigned long end, pgprot_t prot, struct page **pages, int *nr, + pgtbl_mod_mask *mask) + { ++ int err = 0; + pte_t *pte; + + /* +@@ -472,18 +473,25 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, + do { + struct page *page = pages[*nr]; + +- if (WARN_ON(!pte_none(ptep_get(pte)))) +- return -EBUSY; +- if (WARN_ON(!page)) +- return -ENOMEM; +- if (WARN_ON(!pfn_valid(page_to_pfn(page)))) +- return -EINVAL; ++ if (WARN_ON(!pte_none(ptep_get(pte)))) { ++ err = -EBUSY; ++ break; ++ } ++ if (WARN_ON(!page)) { ++ err = -ENOMEM; ++ break; ++ } ++ if (WARN_ON(!pfn_valid(page_to_pfn(page)))) { ++ err = -EINVAL; ++ break; ++ } + + set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); + (*nr)++; + } while (pte++, addr += PAGE_SIZE, addr != end); + *mask |= PGTBL_PTE_MODIFIED; +- return 0; ++ ++ return err; + } + + static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr, +diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c +index b070a89912000a..febb1617e1a6a7 100644 +--- a/net/appletalk/ddp.c ++++ b/net/appletalk/ddp.c +@@ -563,6 +563,7 @@ static int atrtr_create(struct rtentry *r, struct net_device *devhint) + + /* Fill in the routing entry */ + rt->target = ta->sat_addr; ++ dev_put(rt->dev); /* Release old device */ + dev_hold(devhint); + rt->dev = devhint; + rt->flags = r->rt_flags; +diff --git a/net/atm/clip.c b/net/atm/clip.c +index 511467bb7fe40d..53d62361ae4606 100644 +--- a/net/atm/clip.c ++++ b/net/atm/clip.c +@@ -45,7 +45,8 @@ + #include + + static struct net_device *clip_devs; +-static struct atm_vcc *atmarpd; ++static struct atm_vcc __rcu *atmarpd; ++static DEFINE_MUTEX(atmarpd_lock); + static struct timer_list idle_timer; + static const struct neigh_ops clip_neigh_ops; + +@@ -53,24 +54,35 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip) + { + struct sock *sk; + struct atmarp_ctrl *ctrl; ++ struct atm_vcc *vcc; + struct sk_buff *skb; ++ int err = 0; + + pr_debug("(%d)\n", type); +- if (!atmarpd) +- return -EUNATCH; ++ ++ rcu_read_lock(); ++ vcc = rcu_dereference(atmarpd); ++ if (!vcc) { ++ err = -EUNATCH; ++ goto unlock; ++ } + skb = alloc_skb(sizeof(struct atmarp_ctrl), GFP_ATOMIC); +- if (!skb) +- return -ENOMEM; ++ if (!skb) { ++ err = -ENOMEM; ++ goto unlock; ++ } + ctrl = skb_put(skb, sizeof(struct atmarp_ctrl)); + ctrl->type = type; + ctrl->itf_num = itf; + ctrl->ip = ip; +- atm_force_charge(atmarpd, skb->truesize); ++ atm_force_charge(vcc, skb->truesize); + +- sk = sk_atm(atmarpd); ++ sk = sk_atm(vcc); + skb_queue_tail(&sk->sk_receive_queue, skb); + sk->sk_data_ready(sk); +- return 0; ++unlock: ++ rcu_read_unlock(); ++ return err; + } + + static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry) +@@ -417,6 +429,8 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout) + + if (!vcc->push) + return -EBADFD; ++ if (vcc->user_back) ++ return -EINVAL; + clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL); + if (!clip_vcc) + return -ENOMEM; +@@ -607,17 +621,27 @@ static void atmarpd_close(struct atm_vcc *vcc) + { + pr_debug("\n"); + +- rtnl_lock(); +- atmarpd = NULL; ++ mutex_lock(&atmarpd_lock); ++ RCU_INIT_POINTER(atmarpd, NULL); ++ mutex_unlock(&atmarpd_lock); ++ ++ synchronize_rcu(); + skb_queue_purge(&sk_atm(vcc)->sk_receive_queue); +- rtnl_unlock(); + + pr_debug("(done)\n"); + module_put(THIS_MODULE); + } + ++static int atmarpd_send(struct atm_vcc *vcc, struct sk_buff *skb) ++{ ++ atm_return_tx(vcc, skb); ++ dev_kfree_skb_any(skb); ++ return 0; ++} ++ + static const struct atmdev_ops atmarpd_dev_ops = { +- .close = atmarpd_close ++ .close = atmarpd_close, ++ .send = atmarpd_send + }; + + +@@ -631,15 +655,18 @@ static struct atm_dev atmarpd_dev = { + + static int atm_init_atmarp(struct atm_vcc *vcc) + { +- rtnl_lock(); ++ if (vcc->push == clip_push) ++ return -EINVAL; ++ ++ mutex_lock(&atmarpd_lock); + if (atmarpd) { +- rtnl_unlock(); ++ mutex_unlock(&atmarpd_lock); + return -EADDRINUSE; + } + + mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ); + +- atmarpd = vcc; ++ rcu_assign_pointer(atmarpd, vcc); + set_bit(ATM_VF_META, &vcc->flags); + set_bit(ATM_VF_READY, &vcc->flags); + /* allow replies and avoid getting closed if signaling dies */ +@@ -648,13 +675,14 @@ static int atm_init_atmarp(struct atm_vcc *vcc) + vcc->push = NULL; + vcc->pop = NULL; /* crash */ + vcc->push_oam = NULL; /* crash */ +- rtnl_unlock(); ++ mutex_unlock(&atmarpd_lock); + return 0; + } + + static int clip_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) + { + struct atm_vcc *vcc = ATM_SD(sock); ++ struct sock *sk = sock->sk; + int err = 0; + + switch (cmd) { +@@ -675,14 +703,18 @@ static int clip_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) + err = clip_create(arg); + break; + case ATMARPD_CTRL: ++ lock_sock(sk); + err = atm_init_atmarp(vcc); + if (!err) { + sock->state = SS_CONNECTED; + __module_get(THIS_MODULE); + } ++ release_sock(sk); + break; + case ATMARP_MKIP: ++ lock_sock(sk); + err = clip_mkip(vcc, arg); ++ release_sock(sk); + break; + case ATMARP_SETENTRY: + err = clip_setentry(vcc, (__force __be32)arg); +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c +index 4029330e29a998..8516ba62c54559 100644 +--- a/net/bluetooth/hci_event.c ++++ b/net/bluetooth/hci_event.c +@@ -2139,40 +2139,6 @@ static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data, + return rp->status; + } + +-static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data, +- struct sk_buff *skb) +-{ +- struct hci_rp_le_set_ext_adv_params *rp = data; +- struct hci_cp_le_set_ext_adv_params *cp; +- struct adv_info *adv_instance; +- +- bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); +- +- if (rp->status) +- return rp->status; +- +- cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS); +- if (!cp) +- return rp->status; +- +- hci_dev_lock(hdev); +- hdev->adv_addr_type = cp->own_addr_type; +- if (!cp->handle) { +- /* Store in hdev for instance 0 */ +- hdev->adv_tx_power = rp->tx_power; +- } else { +- adv_instance = hci_find_adv_instance(hdev, cp->handle); +- if (adv_instance) +- adv_instance->tx_power = rp->tx_power; +- } +- /* Update adv data as tx power is known now */ +- hci_update_adv_data(hdev, cp->handle); +- +- hci_dev_unlock(hdev); +- +- return rp->status; +-} +- + static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data, + struct sk_buff *skb) + { +@@ -4153,8 +4119,6 @@ static const struct hci_cc { + HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, + hci_cc_le_read_num_adv_sets, + sizeof(struct hci_rp_le_read_num_supported_adv_sets)), +- HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param, +- sizeof(struct hci_rp_le_set_ext_adv_params)), + HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE, + hci_cc_le_set_ext_adv_enable), + HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR, +@@ -6916,7 +6880,10 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, + bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu); + + if (!ev->status) { ++ bis->state = BT_CONNECTED; + set_bit(HCI_CONN_BIG_SYNC, &bis->flags); ++ hci_debugfs_create_conn(bis); ++ hci_conn_add_sysfs(bis); + hci_iso_setup_path(bis); + } + } +diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c +index d602e9d8eff450..e1df1c62017d91 100644 +--- a/net/bluetooth/hci_sync.c ++++ b/net/bluetooth/hci_sync.c +@@ -1224,9 +1224,129 @@ static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + } + ++static int ++hci_set_ext_adv_params_sync(struct hci_dev *hdev, struct adv_info *adv, ++ const struct hci_cp_le_set_ext_adv_params *cp, ++ struct hci_rp_le_set_ext_adv_params *rp) ++{ ++ struct sk_buff *skb; ++ ++ skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(*cp), ++ cp, HCI_CMD_TIMEOUT); ++ ++ /* If command return a status event, skb will be set to -ENODATA */ ++ if (skb == ERR_PTR(-ENODATA)) ++ return 0; ++ ++ if (IS_ERR(skb)) { ++ bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", ++ HCI_OP_LE_SET_EXT_ADV_PARAMS, PTR_ERR(skb)); ++ return PTR_ERR(skb); ++ } ++ ++ if (skb->len != sizeof(*rp)) { ++ bt_dev_err(hdev, "Invalid response length for 0x%4.4x: %u", ++ HCI_OP_LE_SET_EXT_ADV_PARAMS, skb->len); ++ kfree_skb(skb); ++ return -EIO; ++ } ++ ++ memcpy(rp, skb->data, sizeof(*rp)); ++ kfree_skb(skb); ++ ++ if (!rp->status) { ++ hdev->adv_addr_type = cp->own_addr_type; ++ if (!cp->handle) { ++ /* Store in hdev for instance 0 */ ++ hdev->adv_tx_power = rp->tx_power; ++ } else if (adv) { ++ adv->tx_power = rp->tx_power; ++ } ++ } ++ ++ return rp->status; ++} ++ ++static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) ++{ ++ struct { ++ struct hci_cp_le_set_ext_adv_data cp; ++ u8 data[HCI_MAX_EXT_AD_LENGTH]; ++ } pdu; ++ u8 len; ++ struct adv_info *adv = NULL; ++ int err; ++ ++ memset(&pdu, 0, sizeof(pdu)); ++ ++ if (instance) { ++ adv = hci_find_adv_instance(hdev, instance); ++ if (!adv || !adv->adv_data_changed) ++ return 0; ++ } ++ ++ len = eir_create_adv_data(hdev, instance, pdu.data); ++ ++ pdu.cp.length = len; ++ pdu.cp.handle = instance; ++ pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; ++ pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; ++ ++ err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, ++ sizeof(pdu.cp) + len, &pdu.cp, ++ HCI_CMD_TIMEOUT); ++ if (err) ++ return err; ++ ++ /* Update data if the command succeed */ ++ if (adv) { ++ adv->adv_data_changed = false; ++ } else { ++ memcpy(hdev->adv_data, pdu.data, len); ++ hdev->adv_data_len = len; ++ } ++ ++ return 0; ++} ++ ++static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) ++{ ++ struct hci_cp_le_set_adv_data cp; ++ u8 len; ++ ++ memset(&cp, 0, sizeof(cp)); ++ ++ len = eir_create_adv_data(hdev, instance, cp.data); ++ ++ /* There's nothing to do if the data hasn't changed */ ++ if (hdev->adv_data_len == len && ++ memcmp(cp.data, hdev->adv_data, len) == 0) ++ return 0; ++ ++ memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); ++ hdev->adv_data_len = len; ++ ++ cp.length = len; ++ ++ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA, ++ sizeof(cp), &cp, HCI_CMD_TIMEOUT); ++} ++ ++int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance) ++{ ++ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) ++ return 0; ++ ++ if (ext_adv_capable(hdev)) ++ return hci_set_ext_adv_data_sync(hdev, instance); ++ ++ return hci_set_adv_data_sync(hdev, instance); ++} ++ + int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) + { + struct hci_cp_le_set_ext_adv_params cp; ++ struct hci_rp_le_set_ext_adv_params rp; + bool connectable; + u32 flags; + bdaddr_t random_addr; +@@ -1247,7 +1367,7 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) + * Command Disallowed error, so we must first disable the + * instance if it is active. + */ +- if (adv && !adv->pending) { ++ if (adv) { + err = hci_disable_ext_adv_instance_sync(hdev, instance); + if (err) + return err; +@@ -1333,8 +1453,12 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) + cp.secondary_phy = HCI_ADV_PHY_1M; + } + +- err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, +- sizeof(cp), &cp, HCI_CMD_TIMEOUT); ++ err = hci_set_ext_adv_params_sync(hdev, adv, &cp, &rp); ++ if (err) ++ return err; ++ ++ /* Update adv data as tx power is known now */ ++ err = hci_set_ext_adv_data_sync(hdev, cp.handle); + if (err) + return err; + +@@ -1859,82 +1983,6 @@ int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason) + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + } + +-static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) +-{ +- struct { +- struct hci_cp_le_set_ext_adv_data cp; +- u8 data[HCI_MAX_EXT_AD_LENGTH]; +- } pdu; +- u8 len; +- struct adv_info *adv = NULL; +- int err; +- +- memset(&pdu, 0, sizeof(pdu)); +- +- if (instance) { +- adv = hci_find_adv_instance(hdev, instance); +- if (!adv || !adv->adv_data_changed) +- return 0; +- } +- +- len = eir_create_adv_data(hdev, instance, pdu.data); +- +- pdu.cp.length = len; +- pdu.cp.handle = instance; +- pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; +- pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; +- +- err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, +- sizeof(pdu.cp) + len, &pdu.cp, +- HCI_CMD_TIMEOUT); +- if (err) +- return err; +- +- /* Update data if the command succeed */ +- if (adv) { +- adv->adv_data_changed = false; +- } else { +- memcpy(hdev->adv_data, pdu.data, len); +- hdev->adv_data_len = len; +- } +- +- return 0; +-} +- +-static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) +-{ +- struct hci_cp_le_set_adv_data cp; +- u8 len; +- +- memset(&cp, 0, sizeof(cp)); +- +- len = eir_create_adv_data(hdev, instance, cp.data); +- +- /* There's nothing to do if the data hasn't changed */ +- if (hdev->adv_data_len == len && +- memcmp(cp.data, hdev->adv_data, len) == 0) +- return 0; +- +- memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); +- hdev->adv_data_len = len; +- +- cp.length = len; +- +- return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA, +- sizeof(cp), &cp, HCI_CMD_TIMEOUT); +-} +- +-int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance) +-{ +- if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) +- return 0; +- +- if (ext_adv_capable(hdev)) +- return hci_set_ext_adv_data_sync(hdev, instance); +- +- return hci_set_adv_data_sync(hdev, instance); +-} +- + int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance, + bool force) + { +@@ -6253,6 +6301,7 @@ static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, + struct hci_conn *conn) + { + struct hci_cp_le_set_ext_adv_params cp; ++ struct hci_rp_le_set_ext_adv_params rp; + int err; + bdaddr_t random_addr; + u8 own_addr_type; +@@ -6294,8 +6343,12 @@ static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, + if (err) + return err; + +- err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, +- sizeof(cp), &cp, HCI_CMD_TIMEOUT); ++ err = hci_set_ext_adv_params_sync(hdev, NULL, &cp, &rp); ++ if (err) ++ return err; ++ ++ /* Update adv data as tx power is known now */ ++ err = hci_set_ext_adv_data_sync(hdev, cp.handle); + if (err) + return err; + +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index ff22060f9145f9..a4bbe959d1e25f 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -1132,7 +1132,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) + goto do_error; + + while (msg_data_left(msg)) { +- ssize_t copy = 0; ++ int copy = 0; + + skb = tcp_write_queue_tail(sk); + if (skb) +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index 231fa4dc6cde4a..74f7f3e8d96083 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -3499,11 +3499,9 @@ static void addrconf_gre_config(struct net_device *dev) + + ASSERT_RTNL(); + +- idev = ipv6_find_idev(dev); +- if (IS_ERR(idev)) { +- pr_debug("%s: add_dev failed\n", __func__); ++ idev = addrconf_add_dev(dev); ++ if (IS_ERR(idev)) + return; +- } + + /* Generate the IPv6 link-local address using addrconf_addr_gen(), + * unless we have an IPv4 GRE device not bound to an IP address and +@@ -3517,9 +3515,6 @@ static void addrconf_gre_config(struct net_device *dev) + } + + add_v4_addrs(idev); +- +- if (dev->flags & IFF_POINTOPOINT) +- addrconf_add_mroute(dev); + } + #endif + +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index 4aa2cbe9d6fa69..0a412d9a8e5fdb 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -387,7 +387,6 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) + WARN_ON(skb->sk != NULL); + skb->sk = sk; + skb->destructor = netlink_skb_destructor; +- atomic_add(skb->truesize, &sk->sk_rmem_alloc); + sk_mem_charge(sk, skb->truesize); + } + +@@ -1223,41 +1222,48 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size, + int netlink_attachskb(struct sock *sk, struct sk_buff *skb, + long *timeo, struct sock *ssk) + { ++ DECLARE_WAITQUEUE(wait, current); + struct netlink_sock *nlk; ++ unsigned int rmem; + + nlk = nlk_sk(sk); ++ rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc); + +- if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || +- test_bit(NETLINK_S_CONGESTED, &nlk->state))) { +- DECLARE_WAITQUEUE(wait, current); +- if (!*timeo) { +- if (!ssk || netlink_is_kernel(ssk)) +- netlink_overrun(sk); +- sock_put(sk); +- kfree_skb(skb); +- return -EAGAIN; +- } +- +- __set_current_state(TASK_INTERRUPTIBLE); +- add_wait_queue(&nlk->wait, &wait); ++ if ((rmem == skb->truesize || rmem < READ_ONCE(sk->sk_rcvbuf)) && ++ !test_bit(NETLINK_S_CONGESTED, &nlk->state)) { ++ netlink_skb_set_owner_r(skb, sk); ++ return 0; ++ } + +- if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || +- test_bit(NETLINK_S_CONGESTED, &nlk->state)) && +- !sock_flag(sk, SOCK_DEAD)) +- *timeo = schedule_timeout(*timeo); ++ atomic_sub(skb->truesize, &sk->sk_rmem_alloc); + +- __set_current_state(TASK_RUNNING); +- remove_wait_queue(&nlk->wait, &wait); ++ if (!*timeo) { ++ if (!ssk || netlink_is_kernel(ssk)) ++ netlink_overrun(sk); + sock_put(sk); ++ kfree_skb(skb); ++ return -EAGAIN; ++ } + +- if (signal_pending(current)) { +- kfree_skb(skb); +- return sock_intr_errno(*timeo); +- } +- return 1; ++ __set_current_state(TASK_INTERRUPTIBLE); ++ add_wait_queue(&nlk->wait, &wait); ++ rmem = atomic_read(&sk->sk_rmem_alloc); ++ ++ if (((rmem && rmem + skb->truesize > READ_ONCE(sk->sk_rcvbuf)) || ++ test_bit(NETLINK_S_CONGESTED, &nlk->state)) && ++ !sock_flag(sk, SOCK_DEAD)) ++ *timeo = schedule_timeout(*timeo); ++ ++ __set_current_state(TASK_RUNNING); ++ remove_wait_queue(&nlk->wait, &wait); ++ sock_put(sk); ++ ++ if (signal_pending(current)) { ++ kfree_skb(skb); ++ return sock_intr_errno(*timeo); + } +- netlink_skb_set_owner_r(skb, sk); +- return 0; ++ ++ return 1; + } + + static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb) +@@ -1317,6 +1323,7 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb, + ret = -ECONNREFUSED; + if (nlk->netlink_rcv != NULL) { + ret = skb->len; ++ atomic_add(skb->truesize, &sk->sk_rmem_alloc); + netlink_skb_set_owner_r(skb, sk); + NETLINK_CB(skb).sk = ssk; + netlink_deliver_tap_kernel(sk, ssk, skb); +@@ -1393,13 +1400,19 @@ EXPORT_SYMBOL_GPL(netlink_strict_get_check); + static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) + { + struct netlink_sock *nlk = nlk_sk(sk); ++ unsigned int rmem, rcvbuf; + +- if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && ++ rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc); ++ rcvbuf = READ_ONCE(sk->sk_rcvbuf); ++ ++ if ((rmem == skb->truesize || rmem <= rcvbuf) && + !test_bit(NETLINK_S_CONGESTED, &nlk->state)) { + netlink_skb_set_owner_r(skb, sk); + __netlink_sendskb(sk, skb); +- return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); ++ return rmem > (rcvbuf >> 1); + } ++ ++ atomic_sub(skb->truesize, &sk->sk_rmem_alloc); + return -1; + } + +@@ -2186,6 +2199,7 @@ static int netlink_dump(struct sock *sk, bool lock_taken) + struct netlink_ext_ack extack = {}; + struct netlink_callback *cb; + struct sk_buff *skb = NULL; ++ unsigned int rmem, rcvbuf; + size_t max_recvmsg_len; + struct module *module; + int err = -ENOBUFS; +@@ -2199,9 +2213,6 @@ static int netlink_dump(struct sock *sk, bool lock_taken) + goto errout_skb; + } + +- if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) +- goto errout_skb; +- + /* NLMSG_GOODSIZE is small to avoid high order allocations being + * required, but it makes sense to _attempt_ a 16K bytes allocation + * to reduce number of system calls on dump operations, if user +@@ -2224,6 +2235,13 @@ static int netlink_dump(struct sock *sk, bool lock_taken) + if (!skb) + goto errout_skb; + ++ rcvbuf = READ_ONCE(sk->sk_rcvbuf); ++ rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc); ++ if (rmem != skb->truesize && rmem >= rcvbuf) { ++ atomic_sub(skb->truesize, &sk->sk_rmem_alloc); ++ goto errout_skb; ++ } ++ + /* Trim skb to allocated size. User is expected to provide buffer as + * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at + * netlink_recvmsg())). dump will pack as many smaller messages as +diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c +index 0f5a1d77b890f8..773bdb2e37dafd 100644 +--- a/net/rxrpc/call_accept.c ++++ b/net/rxrpc/call_accept.c +@@ -149,6 +149,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, + + id_in_use: + write_unlock(&rx->call_lock); ++ rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, -EBADSLT); + rxrpc_cleanup_call(call); + _leave(" = -EBADSLT"); + return -EBADSLT; +@@ -253,6 +254,9 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, + unsigned short call_tail, conn_tail, peer_tail; + unsigned short call_count, conn_count; + ++ if (!b) ++ return NULL; ++ + /* #calls >= #conns >= #peers must hold true. */ + call_head = smp_load_acquire(&b->call_backlog_head); + call_tail = b->call_backlog_tail; +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c +index 282423106f15d9..a300e8c1b53aaa 100644 +--- a/net/sched/sch_api.c ++++ b/net/sched/sch_api.c +@@ -334,17 +334,22 @@ struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle) + return q; + } + +-static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) ++static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid, ++ struct netlink_ext_ack *extack) + { + unsigned long cl; + const struct Qdisc_class_ops *cops = p->ops->cl_ops; + +- if (cops == NULL) +- return NULL; ++ if (cops == NULL) { ++ NL_SET_ERR_MSG(extack, "Parent qdisc is not classful"); ++ return ERR_PTR(-EOPNOTSUPP); ++ } + cl = cops->find(p, classid); + +- if (cl == 0) +- return NULL; ++ if (cl == 0) { ++ NL_SET_ERR_MSG(extack, "Specified class not found"); ++ return ERR_PTR(-ENOENT); ++ } + return cops->leaf(p, cl); + } + +@@ -1497,7 +1502,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, + NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid"); + return -ENOENT; + } +- q = qdisc_leaf(p, clid); ++ q = qdisc_leaf(p, clid, extack); + } else if (dev_ingress_queue(dev)) { + q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping); + } +@@ -1508,6 +1513,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, + NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device"); + return -ENOENT; + } ++ if (IS_ERR(q)) ++ return PTR_ERR(q); + + if (tcm->tcm_handle && q->handle != tcm->tcm_handle) { + NL_SET_ERR_MSG(extack, "Invalid handle"); +@@ -1601,7 +1608,9 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, + NL_SET_ERR_MSG(extack, "Failed to find specified qdisc"); + return -ENOENT; + } +- q = qdisc_leaf(p, clid); ++ q = qdisc_leaf(p, clid, extack); ++ if (IS_ERR(q)) ++ return PTR_ERR(q); + } else if (dev_ingress_queue_create(dev)) { + q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping); + } +diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c +index 8ee0c07d00e9bb..ffe577bf6b5155 100644 +--- a/net/tipc/topsrv.c ++++ b/net/tipc/topsrv.c +@@ -704,8 +704,10 @@ static void tipc_topsrv_stop(struct net *net) + for (id = 0; srv->idr_in_use; id++) { + con = idr_find(&srv->conn_idr, id); + if (con) { ++ conn_get(con); + spin_unlock_bh(&srv->idr_lock); + tipc_conn_close(con); ++ conn_put(con); + spin_lock_bh(&srv->idr_lock); + } + } +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c +index f8f1a49689da6c..f20b117e5255ef 100644 +--- a/net/vmw_vsock/af_vsock.c ++++ b/net/vmw_vsock/af_vsock.c +@@ -406,6 +406,8 @@ EXPORT_SYMBOL_GPL(vsock_enqueue_accept); + + static bool vsock_use_local_transport(unsigned int remote_cid) + { ++ lockdep_assert_held(&vsock_register_mutex); ++ + if (!transport_local) + return false; + +@@ -463,6 +465,8 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) + + remote_flags = vsk->remote_addr.svm_flags; + ++ mutex_lock(&vsock_register_mutex); ++ + switch (sk->sk_type) { + case SOCK_DGRAM: + new_transport = transport_dgram; +@@ -478,12 +482,15 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) + new_transport = transport_h2g; + break; + default: +- return -ESOCKTNOSUPPORT; ++ ret = -ESOCKTNOSUPPORT; ++ goto err; + } + + if (vsk->transport) { +- if (vsk->transport == new_transport) +- return 0; ++ if (vsk->transport == new_transport) { ++ ret = 0; ++ goto err; ++ } + + /* transport->release() must be called with sock lock acquired. + * This path can only be taken during vsock_connect(), where we +@@ -507,8 +514,16 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) + /* We increase the module refcnt to prevent the transport unloading + * while there are open sockets assigned to it. + */ +- if (!new_transport || !try_module_get(new_transport->module)) +- return -ENODEV; ++ if (!new_transport || !try_module_get(new_transport->module)) { ++ ret = -ENODEV; ++ goto err; ++ } ++ ++ /* It's safe to release the mutex after a successful try_module_get(). ++ * Whichever transport `new_transport` points at, it won't go away until ++ * the last module_put() below or in vsock_deassign_transport(). ++ */ ++ mutex_unlock(&vsock_register_mutex); + + if (sk->sk_type == SOCK_SEQPACKET) { + if (!new_transport->seqpacket_allow || +@@ -527,12 +542,31 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) + vsk->transport = new_transport; + + return 0; ++err: ++ mutex_unlock(&vsock_register_mutex); ++ return ret; + } + EXPORT_SYMBOL_GPL(vsock_assign_transport); + ++/* ++ * Provide safe access to static transport_{h2g,g2h,dgram,local} callbacks. ++ * Otherwise we may race with module removal. Do not use on `vsk->transport`. ++ */ ++static u32 vsock_registered_transport_cid(const struct vsock_transport **transport) ++{ ++ u32 cid = VMADDR_CID_ANY; ++ ++ mutex_lock(&vsock_register_mutex); ++ if (*transport) ++ cid = (*transport)->get_local_cid(); ++ mutex_unlock(&vsock_register_mutex); ++ ++ return cid; ++} ++ + bool vsock_find_cid(unsigned int cid) + { +- if (transport_g2h && cid == transport_g2h->get_local_cid()) ++ if (cid == vsock_registered_transport_cid(&transport_g2h)) + return true; + + if (transport_h2g && cid == VMADDR_CID_HOST) +@@ -2391,18 +2425,19 @@ static long vsock_dev_do_ioctl(struct file *filp, + unsigned int cmd, void __user *ptr) + { + u32 __user *p = ptr; +- u32 cid = VMADDR_CID_ANY; + int retval = 0; ++ u32 cid; + + switch (cmd) { + case IOCTL_VM_SOCKETS_GET_LOCAL_CID: + /* To be compatible with the VMCI behavior, we prioritize the + * guest CID instead of well-know host CID (VMADDR_CID_HOST). + */ +- if (transport_g2h) +- cid = transport_g2h->get_local_cid(); +- else if (transport_h2g) +- cid = transport_h2g->get_local_cid(); ++ cid = vsock_registered_transport_cid(&transport_g2h); ++ if (cid == VMADDR_CID_ANY) ++ cid = vsock_registered_transport_cid(&transport_h2g); ++ if (cid == VMADDR_CID_ANY) ++ cid = vsock_registered_transport_cid(&transport_local); + + if (put_user(cid, p) != 0) + retval = -EFAULT; +diff --git a/net/wireless/util.c b/net/wireless/util.c +index 7acd8d0db61a76..24e5af65da58ea 100644 +--- a/net/wireless/util.c ++++ b/net/wireless/util.c +@@ -826,6 +826,52 @@ bool ieee80211_is_valid_amsdu(struct sk_buff *skb, u8 mesh_hdr) + } + EXPORT_SYMBOL(ieee80211_is_valid_amsdu); + ++ ++/* ++ * Detects if an MSDU frame was maliciously converted into an A-MSDU ++ * frame by an adversary. This is done by parsing the received frame ++ * as if it were a regular MSDU, even though the A-MSDU flag is set. ++ * ++ * For non-mesh interfaces, detection involves checking whether the ++ * payload, when interpreted as an MSDU, begins with a valid RFC1042 ++ * header. This is done by comparing the A-MSDU subheader's destination ++ * address to the start of the RFC1042 header. ++ * ++ * For mesh interfaces, the MSDU includes a 6-byte Mesh Control field ++ * and an optional variable-length Mesh Address Extension field before ++ * the RFC1042 header. The position of the RFC1042 header must therefore ++ * be calculated based on the mesh header length. ++ * ++ * Since this function intentionally parses an A-MSDU frame as an MSDU, ++ * it only assumes that the A-MSDU subframe header is present, and ++ * beyond this it performs its own bounds checks under the assumption ++ * that the frame is instead parsed as a non-aggregated MSDU. ++ */ ++static bool ++is_amsdu_aggregation_attack(struct ethhdr *eth, struct sk_buff *skb, ++ enum nl80211_iftype iftype) ++{ ++ int offset; ++ ++ /* Non-mesh case can be directly compared */ ++ if (iftype != NL80211_IFTYPE_MESH_POINT) ++ return ether_addr_equal(eth->h_dest, rfc1042_header); ++ ++ offset = __ieee80211_get_mesh_hdrlen(eth->h_dest[0]); ++ if (offset == 6) { ++ /* Mesh case with empty address extension field */ ++ return ether_addr_equal(eth->h_source, rfc1042_header); ++ } else if (offset + ETH_ALEN <= skb->len) { ++ /* Mesh case with non-empty address extension field */ ++ u8 temp[ETH_ALEN]; ++ ++ skb_copy_bits(skb, offset, temp, ETH_ALEN); ++ return ether_addr_equal(temp, rfc1042_header); ++ } ++ ++ return false; ++} ++ + void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, + const u8 *addr, enum nl80211_iftype iftype, + const unsigned int extra_headroom, +@@ -867,8 +913,10 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, + /* the last MSDU has no padding */ + if (subframe_len > remaining) + goto purge; +- /* mitigate A-MSDU aggregation injection attacks */ +- if (ether_addr_equal(hdr.eth.h_dest, rfc1042_header)) ++ /* mitigate A-MSDU aggregation injection attacks, to be ++ * checked when processing first subframe (offset == 0). ++ */ ++ if (offset == 0 && is_amsdu_aggregation_attack(&hdr.eth, skb, iftype)) + goto purge; + + offset += sizeof(struct ethhdr); +diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in +index e810e0c27ff18d..866c13468ebf82 100644 +--- a/scripts/gdb/linux/constants.py.in ++++ b/scripts/gdb/linux/constants.py.in +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -93,6 +94,12 @@ LX_GDBPARSED(RADIX_TREE_MAP_SIZE) + LX_GDBPARSED(RADIX_TREE_MAP_SHIFT) + LX_GDBPARSED(RADIX_TREE_MAP_MASK) + ++/* linux/maple_tree.h */ ++LX_VALUE(MAPLE_NODE_SLOTS) ++LX_VALUE(MAPLE_RANGE64_SLOTS) ++LX_VALUE(MAPLE_ARANGE64_SLOTS) ++LX_GDBPARSED(MAPLE_NODE_MASK) ++ + /* linux/vmalloc.h */ + LX_VALUE(VM_IOREMAP) + LX_VALUE(VM_ALLOC) +diff --git a/scripts/gdb/linux/interrupts.py b/scripts/gdb/linux/interrupts.py +index ef478e273791f3..b371a62213a005 100644 +--- a/scripts/gdb/linux/interrupts.py ++++ b/scripts/gdb/linux/interrupts.py +@@ -7,7 +7,7 @@ import gdb + from linux import constants + from linux import cpus + from linux import utils +-from linux import radixtree ++from linux import mapletree + + irq_desc_type = utils.CachedType("struct irq_desc") + +@@ -23,12 +23,12 @@ def irqd_is_level(desc): + def show_irq_desc(prec, irq): + text = "" + +- desc = radixtree.lookup(gdb.parse_and_eval("&irq_desc_tree"), irq) ++ desc = mapletree.mtree_load(gdb.parse_and_eval("&sparse_irqs"), irq) + if desc is None: + return text + +- desc = desc.cast(irq_desc_type.get_type()) +- if desc is None: ++ desc = desc.cast(irq_desc_type.get_type().pointer()) ++ if desc == 0: + return text + + if irq_settings_is_hidden(desc): +@@ -110,7 +110,7 @@ def x86_show_mce(prec, var, pfx, desc): + pvar = gdb.parse_and_eval(var) + text = "%*s: " % (prec, pfx) + for cpu in cpus.each_online_cpu(): +- text += "%10u " % (cpus.per_cpu(pvar, cpu)) ++ text += "%10u " % (cpus.per_cpu(pvar, cpu).dereference()) + text += " %s\n" % (desc) + return text + +@@ -142,7 +142,7 @@ def x86_show_interupts(prec): + + if constants.LX_CONFIG_X86_MCE: + text += x86_show_mce(prec, "&mce_exception_count", "MCE", "Machine check exceptions") +- text == x86_show_mce(prec, "&mce_poll_count", "MCP", "Machine check polls") ++ text += x86_show_mce(prec, "&mce_poll_count", "MCP", "Machine check polls") + + text += show_irq_err_count(prec) + +@@ -221,8 +221,8 @@ class LxInterruptList(gdb.Command): + gdb.write("CPU%-8d" % cpu) + gdb.write("\n") + +- if utils.gdb_eval_or_none("&irq_desc_tree") is None: +- return ++ if utils.gdb_eval_or_none("&sparse_irqs") is None: ++ raise gdb.GdbError("Unable to find the sparse IRQ tree, is CONFIG_SPARSE_IRQ enabled?") + + for irq in range(nr_irqs): + gdb.write(show_irq_desc(prec, irq)) +diff --git a/scripts/gdb/linux/mapletree.py b/scripts/gdb/linux/mapletree.py +new file mode 100644 +index 00000000000000..d52d51c0a03fcb +--- /dev/null ++++ b/scripts/gdb/linux/mapletree.py +@@ -0,0 +1,252 @@ ++# SPDX-License-Identifier: GPL-2.0 ++# ++# Maple tree helpers ++# ++# Copyright (c) 2025 Broadcom ++# ++# Authors: ++# Florian Fainelli ++ ++import gdb ++ ++from linux import utils ++from linux import constants ++from linux import xarray ++ ++maple_tree_root_type = utils.CachedType("struct maple_tree") ++maple_node_type = utils.CachedType("struct maple_node") ++maple_enode_type = utils.CachedType("void") ++ ++maple_dense = 0 ++maple_leaf_64 = 1 ++maple_range_64 = 2 ++maple_arange_64 = 3 ++ ++class Mas(object): ++ ma_active = 0 ++ ma_start = 1 ++ ma_root = 2 ++ ma_none = 3 ++ ma_pause = 4 ++ ma_overflow = 5 ++ ma_underflow = 6 ++ ma_error = 7 ++ ++ def __init__(self, mt, first, end): ++ if mt.type == maple_tree_root_type.get_type().pointer(): ++ self.tree = mt.dereference() ++ elif mt.type != maple_tree_root_type.get_type(): ++ raise gdb.GdbError("must be {} not {}" ++ .format(maple_tree_root_type.get_type().pointer(), mt.type)) ++ self.tree = mt ++ self.index = first ++ self.last = end ++ self.node = None ++ self.status = self.ma_start ++ self.min = 0 ++ self.max = -1 ++ ++ def is_start(self): ++ # mas_is_start() ++ return self.status == self.ma_start ++ ++ def is_ptr(self): ++ # mas_is_ptr() ++ return self.status == self.ma_root ++ ++ def is_none(self): ++ # mas_is_none() ++ return self.status == self.ma_none ++ ++ def root(self): ++ # mas_root() ++ return self.tree['ma_root'].cast(maple_enode_type.get_type().pointer()) ++ ++ def start(self): ++ # mas_start() ++ if self.is_start() is False: ++ return None ++ ++ self.min = 0 ++ self.max = ~0 ++ ++ while True: ++ self.depth = 0 ++ root = self.root() ++ if xarray.xa_is_node(root): ++ self.depth = 0 ++ self.status = self.ma_active ++ self.node = mte_safe_root(root) ++ self.offset = 0 ++ if mte_dead_node(self.node) is True: ++ continue ++ ++ return None ++ ++ self.node = None ++ # Empty tree ++ if root is None: ++ self.status = self.ma_none ++ self.offset = constants.LX_MAPLE_NODE_SLOTS ++ return None ++ ++ # Single entry tree ++ self.status = self.ma_root ++ self.offset = constants.LX_MAPLE_NODE_SLOTS ++ ++ if self.index != 0: ++ return None ++ ++ return root ++ ++ return None ++ ++ def reset(self): ++ # mas_reset() ++ self.status = self.ma_start ++ self.node = None ++ ++def mte_safe_root(node): ++ if node.type != maple_enode_type.get_type().pointer(): ++ raise gdb.GdbError("{} must be {} not {}" ++ .format(mte_safe_root.__name__, maple_enode_type.get_type().pointer(), node.type)) ++ ulong_type = utils.get_ulong_type() ++ indirect_ptr = node.cast(ulong_type) & ~0x2 ++ val = indirect_ptr.cast(maple_enode_type.get_type().pointer()) ++ return val ++ ++def mte_node_type(entry): ++ ulong_type = utils.get_ulong_type() ++ val = None ++ if entry.type == maple_enode_type.get_type().pointer(): ++ val = entry.cast(ulong_type) ++ elif entry.type == ulong_type: ++ val = entry ++ else: ++ raise gdb.GdbError("{} must be {} not {}" ++ .format(mte_node_type.__name__, maple_enode_type.get_type().pointer(), entry.type)) ++ return (val >> 0x3) & 0xf ++ ++def ma_dead_node(node): ++ if node.type != maple_node_type.get_type().pointer(): ++ raise gdb.GdbError("{} must be {} not {}" ++ .format(ma_dead_node.__name__, maple_node_type.get_type().pointer(), node.type)) ++ ulong_type = utils.get_ulong_type() ++ parent = node['parent'] ++ indirect_ptr = node['parent'].cast(ulong_type) & ~constants.LX_MAPLE_NODE_MASK ++ return indirect_ptr == node ++ ++def mte_to_node(enode): ++ ulong_type = utils.get_ulong_type() ++ if enode.type == maple_enode_type.get_type().pointer(): ++ indirect_ptr = enode.cast(ulong_type) ++ elif enode.type == ulong_type: ++ indirect_ptr = enode ++ else: ++ raise gdb.GdbError("{} must be {} not {}" ++ .format(mte_to_node.__name__, maple_enode_type.get_type().pointer(), enode.type)) ++ indirect_ptr = indirect_ptr & ~constants.LX_MAPLE_NODE_MASK ++ return indirect_ptr.cast(maple_node_type.get_type().pointer()) ++ ++def mte_dead_node(enode): ++ if enode.type != maple_enode_type.get_type().pointer(): ++ raise gdb.GdbError("{} must be {} not {}" ++ .format(mte_dead_node.__name__, maple_enode_type.get_type().pointer(), enode.type)) ++ node = mte_to_node(enode) ++ return ma_dead_node(node) ++ ++def ma_is_leaf(tp): ++ result = tp < maple_range_64 ++ return tp < maple_range_64 ++ ++def mt_pivots(t): ++ if t == maple_dense: ++ return 0 ++ elif t == maple_leaf_64 or t == maple_range_64: ++ return constants.LX_MAPLE_RANGE64_SLOTS - 1 ++ elif t == maple_arange_64: ++ return constants.LX_MAPLE_ARANGE64_SLOTS - 1 ++ ++def ma_pivots(node, t): ++ if node.type != maple_node_type.get_type().pointer(): ++ raise gdb.GdbError("{}: must be {} not {}" ++ .format(ma_pivots.__name__, maple_node_type.get_type().pointer(), node.type)) ++ if t == maple_arange_64: ++ return node['ma64']['pivot'] ++ elif t == maple_leaf_64 or t == maple_range_64: ++ return node['mr64']['pivot'] ++ else: ++ return None ++ ++def ma_slots(node, tp): ++ if node.type != maple_node_type.get_type().pointer(): ++ raise gdb.GdbError("{}: must be {} not {}" ++ .format(ma_slots.__name__, maple_node_type.get_type().pointer(), node.type)) ++ if tp == maple_arange_64: ++ return node['ma64']['slot'] ++ elif tp == maple_range_64 or tp == maple_leaf_64: ++ return node['mr64']['slot'] ++ elif tp == maple_dense: ++ return node['slot'] ++ else: ++ return None ++ ++def mt_slot(mt, slots, offset): ++ ulong_type = utils.get_ulong_type() ++ return slots[offset].cast(ulong_type) ++ ++def mtree_lookup_walk(mas): ++ ulong_type = utils.get_ulong_type() ++ n = mas.node ++ ++ while True: ++ node = mte_to_node(n) ++ tp = mte_node_type(n) ++ pivots = ma_pivots(node, tp) ++ end = mt_pivots(tp) ++ offset = 0 ++ while True: ++ if pivots[offset] >= mas.index: ++ break ++ if offset >= end: ++ break ++ offset += 1 ++ ++ slots = ma_slots(node, tp) ++ n = mt_slot(mas.tree, slots, offset) ++ if ma_dead_node(node) is True: ++ mas.reset() ++ return None ++ break ++ ++ if ma_is_leaf(tp) is True: ++ break ++ ++ return n ++ ++def mtree_load(mt, index): ++ ulong_type = utils.get_ulong_type() ++ # MT_STATE(...) ++ mas = Mas(mt, index, index) ++ entry = None ++ ++ while True: ++ entry = mas.start() ++ if mas.is_none(): ++ return None ++ ++ if mas.is_ptr(): ++ if index != 0: ++ entry = None ++ return entry ++ ++ entry = mtree_lookup_walk(mas) ++ if entry is None and mas.is_start(): ++ continue ++ else: ++ break ++ ++ if xarray.xa_is_zero(entry): ++ return None ++ ++ return entry +diff --git a/scripts/gdb/linux/xarray.py b/scripts/gdb/linux/xarray.py +new file mode 100644 +index 00000000000000..f4477b5def75fc +--- /dev/null ++++ b/scripts/gdb/linux/xarray.py +@@ -0,0 +1,28 @@ ++# SPDX-License-Identifier: GPL-2.0 ++# ++# Xarray helpers ++# ++# Copyright (c) 2025 Broadcom ++# ++# Authors: ++# Florian Fainelli ++ ++import gdb ++ ++from linux import utils ++from linux import constants ++ ++def xa_is_internal(entry): ++ ulong_type = utils.get_ulong_type() ++ return ((entry.cast(ulong_type) & 3) == 2) ++ ++def xa_mk_internal(v): ++ return ((v << 2) | 2) ++ ++def xa_is_zero(entry): ++ ulong_type = utils.get_ulong_type() ++ return entry.cast(ulong_type) == xa_mk_internal(257) ++ ++def xa_is_node(entry): ++ ulong_type = utils.get_ulong_type() ++ return xa_is_internal(entry) and (entry.cast(ulong_type) > 4096) +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 0d367cec03adef..1c2059e37fdab6 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -10151,6 +10151,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x8975, "HP EliteBook x360 840 Aero G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x897d, "HP mt440 Mobile Thin Client U74", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8981, "HP Elite Dragonfly G3", ALC245_FIXUP_CS35L41_SPI_4), ++ SND_PCI_QUIRK(0x103c, 0x898a, "HP Pavilion 15-eg100", ALC287_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x898e, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x898f, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8991, "HP EliteBook 845 G9", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), +diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c +index 429e61d47ffbbe..66ef8f4fd02cd4 100644 +--- a/sound/soc/amd/yc/acp6x-mach.c ++++ b/sound/soc/amd/yc/acp6x-mach.c +@@ -346,6 +346,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"), + } + }, ++ { ++ .driver_data = &acp6x_card, ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "RB"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Nitro ANV15-41"), ++ } ++ }, + { + .driver_data = &acp6x_card, + .matches = { +diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c +index d3db89c93b331d..25ee7477709e76 100644 +--- a/sound/soc/codecs/cs35l56-shared.c ++++ b/sound/soc/codecs/cs35l56-shared.c +@@ -661,7 +661,7 @@ int cs35l56_hw_init(struct cs35l56_base *cs35l56_base) + break; + default: + dev_err(cs35l56_base->dev, "Unknown device %x\n", devid); +- return ret; ++ return -ENODEV; + } + + ret = regmap_read(cs35l56_base->regmap, CS35L56_DSP_RESTRICT_STS1, &secured); +diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c +index b793263291dc8d..72dc23c994bcd6 100644 +--- a/sound/soc/fsl/fsl_asrc.c ++++ b/sound/soc/fsl/fsl_asrc.c +@@ -517,7 +517,8 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair, bool use_ideal_rate) + regmap_update_bits(asrc->regmap, REG_ASRCTR, + ASRCTR_ATSi_MASK(index), ASRCTR_ATS(index)); + regmap_update_bits(asrc->regmap, REG_ASRCTR, +- ASRCTR_USRi_MASK(index), 0); ++ ASRCTR_IDRi_MASK(index) | ASRCTR_USRi_MASK(index), ++ ASRCTR_USR(index)); + + /* Set the input and output clock sources */ + regmap_update_bits(asrc->regmap, REG_ASRCSR, +diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h +index 1d111350197f31..76f9cad9fb62b1 100644 +--- a/tools/arch/x86/include/asm/msr-index.h ++++ b/tools/arch/x86/include/asm/msr-index.h +@@ -550,6 +550,7 @@ + #define MSR_AMD64_OSVW_STATUS 0xc0010141 + #define MSR_AMD_PPIN_CTL 0xc00102f0 + #define MSR_AMD_PPIN 0xc00102f1 ++#define MSR_AMD64_CPUID_FN_7 0xc0011002 + #define MSR_AMD64_CPUID_FN_1 0xc0011004 + #define MSR_AMD64_LS_CFG 0xc0011020 + #define MSR_AMD64_DC_CFG 0xc0011022 +diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile +index dad79ede4e0ae0..4f869fff28fe43 100644 +--- a/tools/build/feature/Makefile ++++ b/tools/build/feature/Makefile +@@ -80,7 +80,30 @@ FILES= \ + + FILES := $(addprefix $(OUTPUT),$(FILES)) + +-PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config ++# Some distros provide the command $(CROSS_COMPILE)pkg-config for ++# searching packges installed with Multiarch. Use it for cross ++# compilation if it is existed. ++ifneq (, $(shell which $(CROSS_COMPILE)pkg-config)) ++ PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config ++else ++ PKG_CONFIG ?= pkg-config ++ ++ # PKG_CONFIG_PATH or PKG_CONFIG_LIBDIR, alongside PKG_CONFIG_SYSROOT_DIR ++ # for modified system root, are required for the cross compilation. ++ # If these PKG_CONFIG environment variables are not set, Multiarch library ++ # paths are used instead. ++ ifdef CROSS_COMPILE ++ ifeq ($(PKG_CONFIG_LIBDIR)$(PKG_CONFIG_PATH)$(PKG_CONFIG_SYSROOT_DIR),) ++ CROSS_ARCH = $(shell $(CC) -dumpmachine) ++ PKG_CONFIG_LIBDIR := /usr/local/$(CROSS_ARCH)/lib/pkgconfig/ ++ PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/local/lib/$(CROSS_ARCH)/pkgconfig/ ++ PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/lib/$(CROSS_ARCH)/pkgconfig/ ++ PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/local/share/pkgconfig/ ++ PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/share/pkgconfig/ ++ export PKG_CONFIG_LIBDIR ++ endif ++ endif ++endif + + all: $(FILES) + +diff --git a/tools/include/linux/kallsyms.h b/tools/include/linux/kallsyms.h +index 5a37ccbec54fbc..f61a01dd7eb7c7 100644 +--- a/tools/include/linux/kallsyms.h ++++ b/tools/include/linux/kallsyms.h +@@ -18,6 +18,7 @@ static inline const char *kallsyms_lookup(unsigned long addr, + return NULL; + } + ++#ifdef HAVE_BACKTRACE_SUPPORT + #include + #include + static inline void print_ip_sym(const char *loglvl, unsigned long ip) +@@ -30,5 +31,8 @@ static inline void print_ip_sym(const char *loglvl, unsigned long ip) + + free(name); + } ++#else ++static inline void print_ip_sym(const char *loglvl, unsigned long ip) {} ++#endif + + #endif +diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf +index b97224a8a65b9d..ff72a0d9dea0c3 100644 +--- a/tools/perf/Makefile.perf ++++ b/tools/perf/Makefile.perf +@@ -188,7 +188,32 @@ HOSTLD ?= ld + HOSTAR ?= ar + CLANG ?= clang + +-PKG_CONFIG = $(CROSS_COMPILE)pkg-config ++# Some distros provide the command $(CROSS_COMPILE)pkg-config for ++# searching packges installed with Multiarch. Use it for cross ++# compilation if it is existed. ++ifneq (, $(shell which $(CROSS_COMPILE)pkg-config)) ++ PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config ++else ++ PKG_CONFIG ?= pkg-config ++ ++ # PKG_CONFIG_PATH or PKG_CONFIG_LIBDIR, alongside PKG_CONFIG_SYSROOT_DIR ++ # for modified system root, is required for the cross compilation. ++ # If these PKG_CONFIG environment variables are not set, Multiarch library ++ # paths are used instead. ++ ifdef CROSS_COMPILE ++ ifeq ($(PKG_CONFIG_LIBDIR)$(PKG_CONFIG_PATH)$(PKG_CONFIG_SYSROOT_DIR),) ++ CROSS_ARCH = $(shell $(CC) -dumpmachine) ++ PKG_CONFIG_LIBDIR := /usr/local/$(CROSS_ARCH)/lib/pkgconfig/ ++ PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/local/lib/$(CROSS_ARCH)/pkgconfig/ ++ PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/lib/$(CROSS_ARCH)/pkgconfig/ ++ PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/local/share/pkgconfig/ ++ PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/share/pkgconfig/ ++ export PKG_CONFIG_LIBDIR ++ $(warning Missing PKG_CONFIG_LIBDIR, PKG_CONFIG_PATH and PKG_CONFIG_SYSROOT_DIR for cross compilation,) ++ $(warning set PKG_CONFIG_LIBDIR for using Multiarch libs.) ++ endif ++ endif ++endif + + RM = rm -f + LN = ln -f +diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c +index fda7589c50236c..0921939532c6c2 100644 +--- a/tools/testing/selftests/bpf/test_lru_map.c ++++ b/tools/testing/selftests/bpf/test_lru_map.c +@@ -138,6 +138,18 @@ static int sched_next_online(int pid, int *next_to_try) + return ret; + } + ++/* Derive target_free from map_size, same as bpf_common_lru_populate */ ++static unsigned int __tgt_size(unsigned int map_size) ++{ ++ return (map_size / nr_cpus) / 2; ++} ++ ++/* Inverse of how bpf_common_lru_populate derives target_free from map_size. */ ++static unsigned int __map_size(unsigned int tgt_free) ++{ ++ return tgt_free * nr_cpus * 2; ++} ++ + /* Size of the LRU map is 2 + * Add key=1 (+1 key) + * Add key=2 (+1 key) +@@ -231,11 +243,11 @@ static void test_lru_sanity0(int map_type, int map_flags) + printf("Pass\n"); + } + +-/* Size of the LRU map is 1.5*tgt_free +- * Insert 1 to tgt_free (+tgt_free keys) +- * Lookup 1 to tgt_free/2 +- * Insert 1+tgt_free to 2*tgt_free (+tgt_free keys) +- * => 1+tgt_free/2 to LOCALFREE_TARGET will be removed by LRU ++/* Verify that unreferenced elements are recycled before referenced ones. ++ * Insert elements. ++ * Reference a subset of these. ++ * Insert more, enough to trigger recycling. ++ * Verify that unreferenced are recycled. + */ + static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) + { +@@ -257,7 +269,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) + batch_size = tgt_free / 2; + assert(batch_size * 2 == tgt_free); + +- map_size = tgt_free + batch_size; ++ map_size = __map_size(tgt_free) + batch_size; + lru_map_fd = create_map(map_type, map_flags, map_size); + assert(lru_map_fd != -1); + +@@ -266,13 +278,13 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) + + value[0] = 1234; + +- /* Insert 1 to tgt_free (+tgt_free keys) */ +- end_key = 1 + tgt_free; ++ /* Insert map_size - batch_size keys */ ++ end_key = 1 + __map_size(tgt_free); + for (key = 1; key < end_key; key++) + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); + +- /* Lookup 1 to tgt_free/2 */ ++ /* Lookup 1 to batch_size */ + end_key = 1 + batch_size; + for (key = 1; key < end_key; key++) { + assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value)); +@@ -280,12 +292,13 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) + BPF_NOEXIST)); + } + +- /* Insert 1+tgt_free to 2*tgt_free +- * => 1+tgt_free/2 to LOCALFREE_TARGET will be ++ /* Insert another map_size - batch_size keys ++ * Map will contain 1 to batch_size plus these latest, i.e., ++ * => previous 1+batch_size to map_size - batch_size will have been + * removed by LRU + */ +- key = 1 + tgt_free; +- end_key = key + tgt_free; ++ key = 1 + __map_size(tgt_free); ++ end_key = key + __map_size(tgt_free); + for (; key < end_key; key++) { + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); +@@ -301,17 +314,8 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) + printf("Pass\n"); + } + +-/* Size of the LRU map 1.5 * tgt_free +- * Insert 1 to tgt_free (+tgt_free keys) +- * Update 1 to tgt_free/2 +- * => The original 1 to tgt_free/2 will be removed due to +- * the LRU shrink process +- * Re-insert 1 to tgt_free/2 again and do a lookup immeidately +- * Insert 1+tgt_free to tgt_free*3/2 +- * Insert 1+tgt_free*3/2 to tgt_free*5/2 +- * => Key 1+tgt_free to tgt_free*3/2 +- * will be removed from LRU because it has never +- * been lookup and ref bit is not set ++/* Verify that insertions exceeding map size will recycle the oldest. ++ * Verify that unreferenced elements are recycled before referenced. + */ + static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) + { +@@ -334,7 +338,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) + batch_size = tgt_free / 2; + assert(batch_size * 2 == tgt_free); + +- map_size = tgt_free + batch_size; ++ map_size = __map_size(tgt_free) + batch_size; + lru_map_fd = create_map(map_type, map_flags, map_size); + assert(lru_map_fd != -1); + +@@ -343,8 +347,8 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) + + value[0] = 1234; + +- /* Insert 1 to tgt_free (+tgt_free keys) */ +- end_key = 1 + tgt_free; ++ /* Insert map_size - batch_size keys */ ++ end_key = 1 + __map_size(tgt_free); + for (key = 1; key < end_key; key++) + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); +@@ -357,8 +361,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) + * shrink the inactive list to get tgt_free + * number of free nodes. + * +- * Hence, the oldest key 1 to tgt_free/2 +- * are removed from the LRU list. ++ * Hence, the oldest key is removed from the LRU list. + */ + key = 1; + if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { +@@ -370,8 +373,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) + BPF_EXIST)); + } + +- /* Re-insert 1 to tgt_free/2 again and do a lookup +- * immeidately. ++ /* Re-insert 1 to batch_size again and do a lookup immediately. + */ + end_key = 1 + batch_size; + value[0] = 4321; +@@ -387,17 +389,18 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) + + value[0] = 1234; + +- /* Insert 1+tgt_free to tgt_free*3/2 */ +- end_key = 1 + tgt_free + batch_size; +- for (key = 1 + tgt_free; key < end_key; key++) ++ /* Insert batch_size new elements */ ++ key = 1 + __map_size(tgt_free); ++ end_key = key + batch_size; ++ for (; key < end_key; key++) + /* These newly added but not referenced keys will be + * gone during the next LRU shrink. + */ + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); + +- /* Insert 1+tgt_free*3/2 to tgt_free*5/2 */ +- end_key = key + tgt_free; ++ /* Insert map_size - batch_size elements */ ++ end_key += __map_size(tgt_free); + for (; key < end_key; key++) { + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); +@@ -413,12 +416,12 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) + printf("Pass\n"); + } + +-/* Size of the LRU map is 2*tgt_free +- * It is to test the active/inactive list rotation +- * Insert 1 to 2*tgt_free (+2*tgt_free keys) +- * Lookup key 1 to tgt_free*3/2 +- * Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys) +- * => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU ++/* Test the active/inactive list rotation ++ * ++ * Fill the whole map, deplete the free list. ++ * Reference all except the last lru->target_free elements. ++ * Insert lru->target_free new elements. This triggers one shrink. ++ * Verify that the non-referenced elements are replaced. + */ + static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) + { +@@ -437,8 +440,7 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) + + assert(sched_next_online(0, &next_cpu) != -1); + +- batch_size = tgt_free / 2; +- assert(batch_size * 2 == tgt_free); ++ batch_size = __tgt_size(tgt_free); + + map_size = tgt_free * 2; + lru_map_fd = create_map(map_type, map_flags, map_size); +@@ -449,23 +451,21 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) + + value[0] = 1234; + +- /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */ +- end_key = 1 + (2 * tgt_free); ++ /* Fill the map */ ++ end_key = 1 + map_size; + for (key = 1; key < end_key; key++) + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); + +- /* Lookup key 1 to tgt_free*3/2 */ +- end_key = tgt_free + batch_size; ++ /* Reference all but the last batch_size */ ++ end_key = 1 + map_size - batch_size; + for (key = 1; key < end_key; key++) { + assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value)); + assert(!bpf_map_update_elem(expected_map_fd, &key, value, + BPF_NOEXIST)); + } + +- /* Add 1+2*tgt_free to tgt_free*5/2 +- * (+tgt_free/2 keys) +- */ ++ /* Insert new batch_size: replaces the non-referenced elements */ + key = 2 * tgt_free + 1; + end_key = key + batch_size; + for (; key < end_key; key++) { +@@ -500,7 +500,8 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free) + lru_map_fd = create_map(map_type, map_flags, + 3 * tgt_free * nr_cpus); + else +- lru_map_fd = create_map(map_type, map_flags, 3 * tgt_free); ++ lru_map_fd = create_map(map_type, map_flags, ++ 3 * __map_size(tgt_free)); + assert(lru_map_fd != -1); + + expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0,