aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2024-05-02 12:05:13 -0700
committerJakub Kicinski <kuba@kernel.org>2024-05-02 12:06:25 -0700
commite958da0ddbe831197a0023251880a4a09d5ba268 (patch)
treeeacded26f9563064a44fd1afe730493898adadb9
parentdcc61472534e48a200262fd297ab21f8dd94d6cc (diff)
parent545c494465d24b10a4370545ba213c0916f70b95 (diff)
downloadnext-queue-e958da0ddbe831197a0023251880a4a09d5ba268.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR. Conflicts: include/linux/filter.h kernel/bpf/core.c 66e13b615a0c ("bpf: verifier: prevent userspace memory access") d503a04f8bc0 ("bpf: Add support for certain atomics in bpf_arena to x86 JIT") https://lore.kernel.org/all/20240429114939.210328b0@canb.auug.org.au/ No adjacent changes. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--.mailmap1
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt3
-rw-r--r--Documentation/core-api/workqueue.rst6
-rw-r--r--Documentation/devicetree/bindings/eeprom/at24.yaml5
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/rockchip/grf.yaml1
-rw-r--r--Documentation/rust/arch-support.rst2
-rw-r--r--Documentation/timers/no_hz.rst7
-rw-r--r--Documentation/translations/zh_CN/core-api/workqueue.rst398
-rw-r--r--MAINTAINERS31
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig8
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/boot/Makefile4
-rw-r--r--arch/arc/boot/dts/axc003.dtsi4
-rw-r--r--arch/arc/boot/dts/hsdk.dts1
-rw-r--r--arch/arc/boot/dts/vdk_axs10x_mb.dtsi2
-rw-r--r--arch/arc/include/asm/cachetype.h9
-rw-r--r--arch/arc/include/asm/dsp.h2
-rw-r--r--arch/arc/include/asm/entry-compact.h10
-rw-r--r--arch/arc/include/asm/entry.h4
-rw-r--r--arch/arc/include/asm/irq.h2
-rw-r--r--arch/arc/include/asm/irqflags-compact.h2
-rw-r--r--arch/arc/include/asm/mmu_context.h2
-rw-r--r--arch/arc/include/asm/pgtable-bits-arcv2.h2
-rw-r--r--arch/arc/include/asm/ptrace.h2
-rw-r--r--arch/arc/include/asm/shmparam.h2
-rw-r--r--arch/arc/include/asm/smp.h4
-rw-r--r--arch/arc/include/asm/thread_info.h2
-rw-r--r--arch/arc/include/uapi/asm/swab.h2
-rw-r--r--arch/arc/kernel/entry-arcv2.S8
-rw-r--r--arch/arc/kernel/entry.S4
-rw-r--r--arch/arc/kernel/head.S2
-rw-r--r--arch/arc/kernel/intc-arcv2.c2
-rw-r--r--arch/arc/kernel/kprobes.c7
-rw-r--r--arch/arc/kernel/perf_event.c2
-rw-r--r--arch/arc/kernel/setup.c2
-rw-r--r--arch/arc/kernel/signal.c7
-rw-r--r--arch/arc/kernel/traps.c2
-rw-r--r--arch/arc/kernel/vmlinux.lds.S4
-rw-r--r--arch/arc/mm/tlb.c4
-rw-r--r--arch/arc/mm/tlbex.S8
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts8
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama7g5ek.dts8
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-common.dtsi1
-rw-r--r--arch/arm/net/bpf_jit_32.c56
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712-evb.dts8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712e.dtsi3
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622.dtsi34
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts6
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a.dtsi8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183.dtsi1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi6
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192.dtsi1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi36
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195.dtsi5
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sc8180x.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp.dtsi11
-rw-r--r--arch/arm64/boot/dts/qcom/sm6350.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sm6375.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/sm8450.dtsi16
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/x1e80100.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi53
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts1
-rw-r--r--arch/arm64/kvm/vgic/vgic-kvm-device.c8
-rw-r--r--arch/arm64/net/bpf_jit_comp.c6
-rw-r--r--arch/loongarch/Kconfig2
-rw-r--r--arch/loongarch/include/asm/crash_reserve.h (renamed from arch/loongarch/include/asm/crash_core.h)4
-rw-r--r--arch/loongarch/include/asm/perf_event.h8
-rw-r--r--arch/loongarch/include/asm/tlb.h2
-rw-r--r--arch/loongarch/kernel/perf_event.c2
-rw-r--r--arch/loongarch/mm/fault.c4
-rw-r--r--arch/riscv/Kconfig.errata8
-rw-r--r--arch/riscv/errata/thead/errata.c24
-rw-r--r--arch/riscv/include/asm/errata_list.h20
-rw-r--r--arch/riscv/include/asm/page.h2
-rw-r--r--arch/riscv/include/asm/pgtable.h2
-rw-r--r--arch/riscv/include/uapi/asm/hwprobe.h2
-rw-r--r--arch/riscv/mm/init.c2
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c6
-rw-r--r--arch/x86/Kconfig19
-rw-r--r--arch/x86/include/asm/coco.h1
-rw-r--r--arch/x86/include/asm/pgtable_types.h3
-rw-r--r--arch/x86/kernel/cpu/amd.c3
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/sev-shared.c6
-rw-r--r--arch/x86/net/bpf_jit_comp.c63
-rw-r--r--block/bdev.c2
-rw-r--r--drivers/acpi/cppc_acpi.c57
-rw-r--r--drivers/acpi/x86/s2idle.c8
-rw-r--r--drivers/cxl/core/mbox.c38
-rw-r--r--drivers/dma/idma64.c4
-rw-r--r--drivers/dma/idxd/cdev.c5
-rw-r--r--drivers/dma/idxd/debugfs.c4
-rw-r--r--drivers/dma/idxd/device.c8
-rw-r--r--drivers/dma/idxd/idxd.h2
-rw-r--r--drivers/dma/idxd/init.c2
-rw-r--r--drivers/dma/idxd/irq.c4
-rw-r--r--drivers/dma/idxd/perfmon.c9
-rw-r--r--drivers/dma/owl-dma.c4
-rw-r--r--drivers/dma/pl330.c3
-rw-r--r--drivers/dma/tegra186-gpc-dma.c3
-rw-r--r--drivers/dma/xilinx/xdma-regs.h3
-rw-r--r--drivers/dma/xilinx/xdma.c42
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c13
-rw-r--r--drivers/firmware/qcom/qcom_qseecom_uefisecapp.c137
-rw-r--r--drivers/firmware/qcom/qcom_scm.c37
-rw-r--r--drivers/gpio/gpio-tangier.c9
-rw-r--r--drivers/gpio/gpio-tegra186.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c1
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c25
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c24
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h12
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_hwdb.c34
-rw-r--r--drivers/gpu/drm/gma500/Makefile1
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h9
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c80
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.c19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c4
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c9
-rw-r--r--drivers/i2c/i2c-core-base.c12
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c9
-rw-r--r--drivers/md/dm-vdo/murmurhash3.c2
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/misc/eeprom/at24.c18
-rw-r--r--drivers/mmc/host/moxart-mmc.c1
-rw-r--r--drivers/mmc/host/sdhci-msm.c16
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c1
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c2
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c4
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c7
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c8
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c14
-rw-r--r--drivers/net/vxlan/vxlan_core.c49
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c6
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-comphy.c9
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c12
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h1
-rw-r--r--drivers/phy/rockchip/Kconfig1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c36
-rw-r--r--drivers/phy/rockchip/phy-rockchip-snps-pcie3.c31
-rw-r--r--drivers/phy/ti/phy-tusb1210.c23
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c34
-rw-r--r--drivers/pinctrl/core.c8
-rw-r--r--drivers/pinctrl/devicetree.c10
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c78
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c40
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-a1.c6
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c14
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c1
-rw-r--r--drivers/power/supply/mt6360_charger.c2
-rw-r--r--drivers/power/supply/rt9455_charger.c2
-rw-r--r--drivers/regulator/irq_helpers.c3
-rw-r--r--drivers/regulator/mt6360-regulator.c32
-rw-r--r--drivers/regulator/qcom-refgen-regulator.c1
-rw-r--r--drivers/regulator/vqmmc-ipq4019-regulator.c1
-rw-r--r--drivers/s390/net/qeth_core_main.c61
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/soc/mediatek/Kconfig1
-rw-r--r--drivers/soc/mediatek/mtk-svs.c7
-rw-r--r--drivers/soundwire/amd_manager.c15
-rw-r--r--drivers/soundwire/amd_manager.h3
-rw-r--r--drivers/vdpa/vdpa.c6
-rw-r--r--drivers/video/fbdev/core/fb_defio.c2
-rw-r--r--fs/9p/v9fs.h11
-rw-r--r--fs/9p/vfs_inode.c37
-rw-r--r--fs/9p/vfs_inode_dotl.c28
-rw-r--r--fs/9p/vfs_super.c2
-rw-r--r--fs/bcachefs/btree_node_scan.c7
-rw-r--r--fs/bcachefs/btree_node_scan_types.h1
-rw-r--r--fs/bcachefs/buckets.c2
-rw-r--r--fs/bcachefs/inode.c2
-rw-r--r--fs/erofs/fscache.c2
-rw-r--r--fs/erofs/internal.h7
-rw-r--r--fs/erofs/super.c124
-rw-r--r--fs/ioctl.c4
-rw-r--r--fs/netfs/buffered_write.c23
-rw-r--r--fs/nfs/inode.c7
-rw-r--r--fs/nfsd/nfs4xdr.c2
-rw-r--r--fs/ntfs3/Kconfig9
-rw-r--r--fs/ntfs3/dir.c7
-rw-r--r--fs/ntfs3/file.c8
-rw-r--r--fs/ntfs3/inode.c20
-rw-r--r--fs/ntfs3/ntfs_fs.h4
-rw-r--r--fs/ntfs3/super.c65
-rw-r--r--fs/proc/page.c7
-rw-r--r--fs/smb/client/cifspdu.h4
-rw-r--r--fs/smb/client/smb2pdu.h2
-rw-r--r--fs/smb/client/transport.c7
-rw-r--r--include/linux/cpu.h11
-rw-r--r--include/linux/filter.h1
-rw-r--r--include/linux/firmware/qcom/qcom_qseecom.h55
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h10
-rw-r--r--include/linux/mm.h8
-rw-r--r--include/linux/page-flags.h144
-rw-r--r--include/linux/profile.h5
-rw-r--r--include/linux/regulator/consumer.h4
-rw-r--r--include/linux/skmsg.h2
-rw-r--r--include/net/gro.h9
-rw-r--r--include/trace/events/mmflags.h1
-rw-r--r--include/uapi/drm/etnaviv_drm.h5
-rw-r--r--include/uapi/linux/vdpa.h6
-rw-r--r--init/Kconfig2
-rw-r--r--kernel/bounds.c2
-rw-r--r--kernel/bpf/core.c9
-rw-r--r--kernel/bpf/verifier.c33
-rw-r--r--kernel/cpu.c14
-rw-r--r--kernel/profile.c43
-rw-r--r--kernel/sched/fair.c34
-rw-r--r--kernel/sched/isolation.c18
-rw-r--r--kernel/vmcore_info.c5
-rw-r--r--kernel/workqueue.c19
-rw-r--r--lib/Kconfig.debug5
-rw-r--r--lib/scatterlist.c2
-rw-r--r--lib/stackdepot.c4
-rw-r--r--mm/hugetlb.c40
-rw-r--r--mm/zswap.c25
-rw-r--r--net/8021q/vlan_core.c2
-rw-r--r--net/bridge/br_forward.c2
-rw-r--r--net/core/filter.c42
-rw-r--r--net/core/gro.c1
-rw-r--r--net/core/skbuff.c27
-rw-r--r--net/core/skmsg.c5
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/raw.c3
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv4/udp_offload.c15
-rw-r--r--net/ipv6/ip6_offload.c1
-rw-r--r--net/ipv6/udp.c3
-rw-r--r--net/ipv6/udp_offload.c3
-rw-r--r--net/l2tp/l2tp_eth.c3
-rw-r--r--net/mptcp/protocol.c3
-rw-r--r--net/nsh/nsh.c14
-rw-r--r--net/rxrpc/conn_object.c9
-rw-r--r--net/rxrpc/insecure.c2
-rw-r--r--net/rxrpc/rxkad.c2
-rw-r--r--net/rxrpc/txbuf.c10
-rw-r--r--net/sunrpc/xprtsock.c1
-rw-r--r--net/tipc/msg.c8
-rw-r--r--rust/Makefile1
-rw-r--r--rust/kernel/init.rs11
-rw-r--r--rust/kernel/lib.rs2
-rw-r--r--rust/kernel/net/phy.rs4
-rw-r--r--rust/macros/lib.rs12
-rw-r--r--rust/macros/module.rs190
-rw-r--r--scripts/Makefile.build2
-rw-r--r--tools/perf/arch/riscv/util/header.c2
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c3
-rw-r--r--tools/testing/selftests/kselftest_harness.h12
-rw-r--r--tools/testing/selftests/kvm/aarch64/vgic_init.c49
-rw-r--r--tools/testing/selftests/mm/mdwe_test.c1
-rw-r--r--tools/testing/selftests/mm/protection_keys.c38
-rwxr-xr-xtools/testing/selftests/mm/run_vmtests.sh2
-rw-r--r--tools/testing/selftests/mm/split_huge_page_test.c2
-rw-r--r--tools/testing/selftests/riscv/hwprobe/cbo.c2
-rw-r--r--tools/testing/selftests/riscv/hwprobe/hwprobe.h10
-rw-r--r--tools/testing/selftests/syscall_user_dispatch/sud_test.c14
306 files changed, 2454 insertions, 1532 deletions
diff --git a/.mailmap b/.mailmap
index 16b704e1d5d36..9f41b3906b663 100644
--- a/.mailmap
+++ b/.mailmap
@@ -512,6 +512,7 @@ Praveen BP <praveenbp@ti.com>
Pradeep Kumar Chitrapu <quic_pradeepc@quicinc.com> <pradeepc@codeaurora.org>
Prasad Sodagudi <quic_psodagud@quicinc.com> <psodagud@codeaurora.org>
Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
+Puranjay Mohan <puranjay@kernel.org> <puranjay12@gmail.com>
Qais Yousef <qyousef@layalina.io> <qais.yousef@imgtec.com>
Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com>
Quentin Monnet <qmo@kernel.org> <quentin.monnet@netronome.com>
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 902ecd92a29fb..213d0719e2b7a 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3423,6 +3423,9 @@
arch-independent options, each of which is an
aggregation of existing arch-specific options.
+ Note, "mitigations" is supported if and only if the
+ kernel was built with CPU_MITIGATIONS=y.
+
off
Disable all optional CPU mitigations. This
improves system performance, but it may also
diff --git a/Documentation/core-api/workqueue.rst b/Documentation/core-api/workqueue.rst
index ed73c612174d4..bcc370c876be9 100644
--- a/Documentation/core-api/workqueue.rst
+++ b/Documentation/core-api/workqueue.rst
@@ -671,7 +671,7 @@ configuration, worker pools and how workqueues map to the pools: ::
events_unbound unbound 9 9 10 10 8
events_freezable percpu 0 2 4 6
events_power_efficient percpu 0 2 4 6
- events_freezable_power_ percpu 0 2 4 6
+ events_freezable_pwr_ef percpu 0 2 4 6
rcu_gp percpu 0 2 4 6
rcu_par_gp percpu 0 2 4 6
slub_flushwq percpu 0 2 4 6
@@ -694,7 +694,7 @@ Use tools/workqueue/wq_monitor.py to monitor workqueue operations: ::
events_unbound 38306 0 0.1 - 7 - -
events_freezable 0 0 0.0 0 0 - -
events_power_efficient 29598 0 0.2 0 0 - -
- events_freezable_power_ 10 0 0.0 0 0 - -
+ events_freezable_pwr_ef 10 0 0.0 0 0 - -
sock_diag_events 0 0 0.0 0 0 - -
total infl CPUtime CPUhog CMW/RPR mayday rescued
@@ -704,7 +704,7 @@ Use tools/workqueue/wq_monitor.py to monitor workqueue operations: ::
events_unbound 38322 0 0.1 - 7 - -
events_freezable 0 0 0.0 0 0 - -
events_power_efficient 29603 0 0.2 0 0 - -
- events_freezable_power_ 10 0 0.0 0 0 - -
+ events_freezable_pwr_ef 10 0 0.0 0 0 - -
sock_diag_events 0 0 0.0 0 0 - -
...
diff --git a/Documentation/devicetree/bindings/eeprom/at24.yaml b/Documentation/devicetree/bindings/eeprom/at24.yaml
index 1812ef31d5f1e..3c36cd0510de8 100644
--- a/Documentation/devicetree/bindings/eeprom/at24.yaml
+++ b/Documentation/devicetree/bindings/eeprom/at24.yaml
@@ -69,14 +69,10 @@ properties:
- items:
pattern: c32$
- items:
- pattern: c32d-wl$
- - items:
pattern: cs32$
- items:
pattern: c64$
- items:
- pattern: c64d-wl$
- - items:
pattern: cs64$
- items:
pattern: c128$
@@ -136,6 +132,7 @@ properties:
- renesas,r1ex24128
- samsung,s524ad0xd1
- const: atmel,24c128
+ - pattern: '^atmel,24c(32|64)d-wl$' # Actual vendor is st
label:
description: Descriptive name of the EEPROM.
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
index d476de82e5c3f..4d5a957fa232e 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
@@ -120,7 +120,9 @@ additionalProperties:
slew-rate: true
gpio-hog: true
gpios: true
+ input: true
input-enable: true
+ output-enable: true
output-high: true
output-low: true
line-name: true
diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
index 0b87c266760c6..79798c7474768 100644
--- a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
+++ b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
@@ -171,6 +171,7 @@ allOf:
unevaluatedProperties: false
pcie-phy:
+ type: object
description:
Documentation/devicetree/bindings/phy/rockchip-pcie-phy.txt
diff --git a/Documentation/rust/arch-support.rst b/Documentation/rust/arch-support.rst
index 5c4fa9f5d1cdb..c9137710633a3 100644
--- a/Documentation/rust/arch-support.rst
+++ b/Documentation/rust/arch-support.rst
@@ -16,7 +16,7 @@ support corresponds to ``S`` values in the ``MAINTAINERS`` file.
Architecture Level of support Constraints
============= ================ ==============================================
``arm64`` Maintained Little Endian only.
-``loongarch`` Maintained -
+``loongarch`` Maintained \-
``um`` Maintained ``x86_64`` only.
``x86`` Maintained ``x86_64`` only.
============= ================ ==============================================
diff --git a/Documentation/timers/no_hz.rst b/Documentation/timers/no_hz.rst
index f8786be15183c..7fe8ef9718d8e 100644
--- a/Documentation/timers/no_hz.rst
+++ b/Documentation/timers/no_hz.rst
@@ -129,11 +129,8 @@ adaptive-tick CPUs: At least one non-adaptive-tick CPU must remain
online to handle timekeeping tasks in order to ensure that system
calls like gettimeofday() returns accurate values on adaptive-tick CPUs.
(This is not an issue for CONFIG_NO_HZ_IDLE=y because there are no running
-user processes to observe slight drifts in clock rate.) Therefore, the
-boot CPU is prohibited from entering adaptive-ticks mode. Specifying a
-"nohz_full=" mask that includes the boot CPU will result in a boot-time
-error message, and the boot CPU will be removed from the mask. Note that
-this means that your system must have at least two CPUs in order for
+user processes to observe slight drifts in clock rate.) Note that this
+means that your system must have at least two CPUs in order for
CONFIG_NO_HZ_FULL=y to do anything for you.
Finally, adaptive-ticks CPUs must have their RCU callbacks offloaded.
diff --git a/Documentation/translations/zh_CN/core-api/workqueue.rst b/Documentation/translations/zh_CN/core-api/workqueue.rst
index 7fac6f75d0782..fe0ff5a127f3f 100644
--- a/Documentation/translations/zh_CN/core-api/workqueue.rst
+++ b/Documentation/translations/zh_CN/core-api/workqueue.rst
@@ -7,12 +7,13 @@
å¸å»¶è…¾ Yanteng Si <siyanteng@loongson.cn>
周彬彬 Binbin Zhou <zhoubinbin@loongson.cn>
+ é™ˆå…´å‹ Xingyou Chen <rockrush@rockwork.org>
.. _cn_workqueue.rst:
-=========================
-并å‘管ç†çš„工作队列 (cmwq)
-=========================
+========
+工作队列
+========
:日期: September, 2010
:作者: Tejun Heo <tj@kernel.org>
@@ -22,7 +23,7 @@
简介
====
-在很多情况下,需è¦ä¸€ä¸ªå¼‚步进程的执行环境,工作队列(wq)API是这ç§æƒ…况下
+在很多情况下,需è¦ä¸€ä¸ªå¼‚步的程åºæ‰§è¡ŒçŽ¯å¢ƒï¼Œå·¥ä½œé˜Ÿåˆ—(wq)API是这ç§æƒ…况下
最常用的机制。
当需è¦è¿™æ ·ä¸€ä¸ªå¼‚步执行上下文时,一个æè¿°å°†è¦æ‰§è¡Œçš„函数的工作项(work,
@@ -34,8 +35,8 @@
队列时,工作者åˆå¼€å§‹æ‰§è¡Œã€‚
-为什么è¦cmwq?
-=============
+为什么è¦æœ‰å¹¶å‘管ç†å·¥ä½œé˜Ÿåˆ—?
+===========================
在最åˆçš„wq实现中,多线程(MT)wq在æ¯ä¸ªCPU上有一个工作者线程,而å•çº¿ç¨‹
(ST)wq在全系统有一个工作者线程。一个MT wq需è¦ä¿æŒä¸ŽCPUæ•°é‡ç›¸åŒçš„å·¥
@@ -73,9 +74,11 @@
å‘该函数的工作项,并在工作队列中排队等待该工作项。(就是挂到workqueue
队列里é¢åŽ»ï¼‰
-特定目的线程,称为工作线程(工作者),一个接一个地执行队列中的功能。
-如果没有工作项排队,工作者线程就会闲置。这些工作者线程被管ç†åœ¨æ‰€è°“
-的工作者池中。
+工作项å¯ä»¥åœ¨çº¿ç¨‹æˆ–BH(软中断)上下文中执行。
+
+对于由线程执行的工作队列,被称为(内核)工作者([k]worker)的特殊
+线程会ä¾æ¬¡æ‰§è¡Œå…¶ä¸­çš„函数。如果没有工作项排队,工作者线程就会闲置。
+这些工作者线程被管ç†åœ¨æ‰€è°“的工作者池中。
cmwq设计区分了é¢å‘用户的工作队列,å­ç³»ç»Ÿå’Œé©±åŠ¨ç¨‹åºåœ¨ä¸Šé¢æŽ’队工作,
以åŠç®¡ç†å·¥ä½œè€…池和处ç†æŽ’队工作项的åŽç«¯æœºåˆ¶ã€‚
@@ -84,6 +87,10 @@ cmwq设计区分了é¢å‘用户的工作队列,å­ç³»ç»Ÿå’Œé©±åŠ¨ç¨‹åºåœ¨ä¸Šé
优先级的工作项,还有一些é¢å¤–的工作者池,用于æœåŠ¡æœªç»‘定工作队列的工
作项目——这些åŽå¤‡æ± çš„æ•°é‡æ˜¯åŠ¨æ€çš„。
+BH工作队列使用相åŒçš„结构。然而,由于åŒä¸€æ—¶é—´åªå¯èƒ½æœ‰ä¸€ä¸ªæ‰§è¡Œä¸Šä¸‹æ–‡ï¼Œ
+ä¸éœ€è¦æ‹…心并å‘问题。æ¯ä¸ªCPU上的BH工作者池åªåŒ…å«ä¸€ä¸ªç”¨äºŽè¡¨ç¤ºBH执行
+上下文的虚拟工作者。BH工作队列å¯ä»¥è¢«çœ‹ä½œè½¯ä¸­æ–­çš„便æ·æŽ¥å£ã€‚
+
当他们认为åˆé€‚的时候,å­ç³»ç»Ÿå’Œé©±åŠ¨ç¨‹åºå¯ä»¥é€šè¿‡ç‰¹æ®Šçš„
``workqueue API`` 函数创建和排队工作项。他们å¯ä»¥é€šè¿‡åœ¨å·¥ä½œé˜Ÿåˆ—上
设置标志æ¥å½±å“工作项执行方å¼çš„æŸäº›æ–¹é¢ï¼Œä»–们把工作项放在那里。这些
@@ -95,9 +102,9 @@ cmwq设计区分了é¢å‘用户的工作队列,å­ç³»ç»Ÿå’Œé©±åŠ¨ç¨‹åºåœ¨ä¸Šé
å¦åˆ™ä¸€ä¸ªç»‘定的工作队列的工作项将被排在与å‘起线程è¿è¡Œçš„CPU相关的普
通或高级工作工作者池的工作项列表中。
-对于任何工作者池的实施,管ç†å¹¶å‘水平(有多少执行上下文处于活动状
-æ€ï¼‰æ˜¯ä¸€ä¸ªé‡è¦é—®é¢˜ã€‚最低水平是为了节çœèµ„æºï¼Œè€Œé¥±å’Œæ°´å¹³æ˜¯æŒ‡ç³»ç»Ÿè¢«
-充分使用。
+对于任何线程池的实施,管ç†å¹¶å‘水平(有多少执行上下文处于活动状
+æ€ï¼‰æ˜¯ä¸€ä¸ªé‡è¦é—®é¢˜ã€‚cmwq试图将并å‘ä¿æŒåœ¨ä¸€ä¸ªå°½å¯èƒ½ä½Žä¸”充足的
+水平。最低水平是为了节çœèµ„æºï¼Œè€Œå……足是为了使系统能被充分使用。
æ¯ä¸ªä¸Žå®žé™…CPU绑定的worker-pool通过钩ä½è°ƒåº¦å™¨æ¥å®žçŽ°å¹¶å‘管ç†ã€‚æ¯å½“
一个活动的工作者被唤醒或ç¡çœ æ—¶ï¼Œå·¥ä½œè€…池就会得到通知,并跟踪当å‰å¯
@@ -140,6 +147,17 @@ workqueue将自动创建与属性相匹é…çš„åŽå¤‡å·¥ä½œè€…池。调节并å‘æ°
``flags``
---------
+``WQ_BH``
+ BH工作队列å¯ä»¥è¢«çœ‹ä½œè½¯ä¸­æ–­çš„便æ·æŽ¥å£ã€‚它总是æ¯ä¸ªCPU一份,
+ 其中的å„个工作项也会按在队列中的顺åºï¼Œè¢«æ‰€å±žCPU在软中断
+ 上下文中执行。
+
+ BH工作队列的 ``max_active`` 值必须为0,且åªèƒ½å•ç‹¬æˆ–å’Œ
+ ``WQ_HIGHPRI`` 标志组åˆä½¿ç”¨ã€‚
+
+ BH工作项ä¸å¯ä»¥ç¡çœ ã€‚åƒå»¶è¿ŸæŽ’队ã€å†²æ´—ã€å–消等所有其他特性
+ 都是支æŒçš„。
+
``WQ_UNBOUND``
排队到éžç»‘定wq的工作项由特殊的工作者池æä¾›æœåŠ¡ï¼Œè¿™äº›å·¥ä½œè€…ä¸
绑定在任何特定的CPU上。这使得wq表现得åƒä¸€ä¸ªç®€å•çš„执行环境æ
@@ -184,25 +202,21 @@ workqueue将自动创建与属性相匹é…çš„åŽå¤‡å·¥ä½œè€…池。调节并å‘æ°
--------------
``@max_active`` 决定了æ¯ä¸ªCPUå¯ä»¥åˆ†é…ç»™wq的工作项的最大执行上
-下文数é‡ã€‚例如,如果 ``@max_active为16`` ,æ¯ä¸ªCPU最多å¯ä»¥åŒ
-时执行16个wq的工作项。
+下文数é‡ã€‚例如,如果 ``@max_active`` 为16 ,æ¯ä¸ªCPU最多å¯ä»¥åŒ
+时执行16个wq的工作项。它总是æ¯CPU属性,å³ä¾¿å¯¹äºŽæœªç»‘定 wq。
-ç›®å‰ï¼Œå¯¹äºŽä¸€ä¸ªç»‘定的wq, ``@max_active`` 的最大é™åˆ¶æ˜¯512,当指
-定为0时使用的默认值是256。对于éžç»‘定的wq,其é™åˆ¶æ˜¯512å’Œ
-4 * ``num_possible_cpus()`` 中的较高值。这些值被选得足够高,所
-以它们ä¸æ˜¯é™åˆ¶æ€§å› ç´ ï¼ŒåŒæ—¶ä¼šåœ¨å¤±æŽ§æƒ…况下æä¾›ä¿æŠ¤ã€‚
+``@max_active`` 的最大é™åˆ¶æ˜¯512,当指定为0时使用的默认值是256。
+这些值被选得足够高,所以它们ä¸æ˜¯é™åˆ¶æ€§å› ç´ ï¼ŒåŒæ—¶ä¼šåœ¨å¤±æŽ§æƒ…况下æä¾›
+ä¿æŠ¤ã€‚
一个wq的活动工作项的数é‡é€šå¸¸ç”±wq的用户æ¥è°ƒèŠ‚,更具体地说,是由用
户在åŒä¸€æ—¶é—´å¯ä»¥æŽ’列多少个工作项æ¥è°ƒèŠ‚。除éžæœ‰ç‰¹å®šçš„需求æ¥æŽ§åˆ¶æ´»åŠ¨
工作项的数é‡ï¼Œå¦åˆ™å»ºè®®æŒ‡å®š 为"0"。
-一些用户ä¾èµ–于ST wq的严格执行顺åºã€‚ ``@max_active`` 为1å’Œ ``WQ_UNBOUND``
-的组åˆç”¨æ¥å®žçŽ°è¿™ç§è¡Œä¸ºã€‚è¿™ç§wq上的工作项目总是被排到未绑定的工作池
-中,并且在任何时候都åªæœ‰ä¸€ä¸ªå·¥ä½œé¡¹ç›®å¤„于活动状æ€ï¼Œä»Žè€Œå®žçŽ°ä¸ŽST wq相
-åŒçš„排åºå±žæ€§ã€‚
-
-在目å‰çš„实现中,上述é…ç½®åªä¿è¯äº†ç‰¹å®šNUMA节点内的ST行为。相å,
-``alloc_ordered_workqueue()`` 应该被用æ¥å®žçŽ°å…¨ç³»ç»Ÿçš„ST行为。
+一些用户ä¾èµ–于任æ„时刻最多åªæœ‰ä¸€ä¸ªå·¥ä½œé¡¹è¢«æ‰§è¡Œï¼Œä¸”å„工作项被按队列中
+顺åºå¤„ç†å¸¦æ¥çš„严格执行顺åºã€‚``@max_active`` 为1å’Œ ``WQ_UNBOUND``
+的组åˆæ›¾è¢«ç”¨æ¥å®žçŽ°è¿™ç§è¡Œä¸ºï¼ŒçŽ°åœ¨ä¸ç”¨äº†ã€‚请使用
+``alloc_ordered_workqueue()`` 。
执行场景示例
@@ -285,7 +299,7 @@ And with cmwq with ``@max_active`` >= 3, ::
* 除éžæœ‰ç‰¹æ®Šéœ€è¦ï¼Œå»ºè®®ä½¿ç”¨0作为@max_active。在大多数使用情
况下,并å‘水平通常ä¿æŒåœ¨é»˜è®¤é™åˆ¶ä¹‹ä¸‹ã€‚
-* 一个wq作为å‰è¿›è¿›åº¦ä¿è¯ï¼ˆWQ_MEM_RECLAIM,冲洗(flush)和工
+* 一个wq作为å‰è¿›è¿›åº¦ä¿è¯ï¼Œ``WQ_MEM_RECLAIM`` ,冲洗(flush)和工
作项属性的域。ä¸æ¶‰åŠå†…存回收的工作项,ä¸éœ€è¦ä½œä¸ºå·¥ä½œé¡¹ç»„的一
部分被刷新,也ä¸éœ€è¦ä»»ä½•ç‰¹æ®Šå±žæ€§ï¼Œå¯ä»¥ä½¿ç”¨ç³»ç»Ÿä¸­çš„一个wq。使
用专用wq和系统wq在执行特性上没有区别。
@@ -294,6 +308,337 @@ And with cmwq with ``@max_active`` >= 3, ::
益的,因为wqæ“作和工作项执行中的定ä½æ°´å¹³æ高了。
+亲和性作用域
+============
+
+一个éžç»‘定工作队列根æ®å…¶äº²å’Œæ€§ä½œç”¨åŸŸæ¥å¯¹CPU进行分组以æ高缓存
+局部性。比如如果一个工作队列使用默认的“cacheâ€äº²å’Œæ€§ä½œç”¨åŸŸï¼Œ
+它将根æ®æœ€åŽä¸€çº§ç¼“存的边界æ¥åˆ†ç»„处ç†å™¨ã€‚这个工作队列上的工作项
+将被分é…给一个与å‘èµ·CPU共用最åŽçº§ç¼“存的处ç†å™¨ä¸Šçš„工作者。根æ®
+``affinity_strict`` 的设置,工作者在å¯åŠ¨åŽå¯èƒ½è¢«å…许移出
+所在作用域,也å¯èƒ½ä¸è¢«å…许。
+
+工作队列目å‰æ”¯æŒä»¥ä¸‹äº²å’Œæ€§ä½œç”¨åŸŸã€‚
+
+``default``
+ 使用模å—å‚æ•° ``workqueue.default_affinity_scope`` 指定
+ 的作用域,该å‚数总是会被设为以下作用域中的一个。
+
+``cpu``
+ CPUä¸è¢«åˆ†ç»„。一个CPU上å‘起的工作项会被åŒä¸€CPU上的工作者执行。
+ 这使éžç»‘定工作队列表现得åƒæ˜¯ä¸å«å¹¶å‘管ç†çš„æ¯CPU工作队列。
+
+``smt``
+ CPU被按SMT边界分组。这通常æ„味ç€æ¯ä¸ªç‰©ç†CPU核上的å„逻辑CPU会
+ 被分进åŒä¸€ç»„。
+
+``cache``
+ CPU被按缓存边界分组。采用哪个缓存边界由架构代ç å†³å®šã€‚很多情况
+ 下会使用L3。这是默认的亲和性作用域。
+
+``numa``
+ CPU被按NUMA边界分组。
+
+``system``
+ 所有CPU被放在åŒä¸€ç»„。工作队列ä¸å°è¯•åœ¨ä¸´è¿‘å‘èµ·CPUçš„CPU上è¿è¡Œ
+ 工作项。
+
+默认的亲和性作用域å¯ä»¥è¢«æ¨¡å—å‚æ•° ``workqueue.default_affinity_scope``
+修改,特定工作队列的亲和性作用域å¯ä»¥é€šè¿‡ ``apply_workqueue_attrs()``
+被更改。
+
+如果设置了 ``WQ_SYSFS`` ,工作队列会在它的 ``/sys/devices/virtual/workqueue/WQ_NAME/``
+目录中有以下亲和性作用域相关的接å£æ–‡ä»¶ã€‚
+
+``affinity_scope``
+ 读æ“作以查看当å‰çš„亲和性作用域。写æ“作用于更改设置。
+
+ 当å‰ä½œç”¨åŸŸæ˜¯é»˜è®¤å€¼æ—¶ï¼Œå½“å‰ç”Ÿæ•ˆçš„作用域也å¯ä»¥è¢«ä»Žè¿™ä¸ªæ–‡ä»¶ä¸­
+ 读到(å°æ‹¬å·å†…),例如 ``default (cache)`` 。
+
+``affinity_strict``
+ 默认值0表明亲和性作用域ä¸æ˜¯ä¸¥æ ¼çš„。当一个工作项开始执行时,
+ 工作队列尽é‡å°è¯•ä½¿å·¥ä½œè€…处于亲和性作用域内,称为é£è¿”。å¯åŠ¨åŽï¼Œ
+ 调度器å¯ä»¥è‡ªç”±åœ°å°†å·¥ä½œè€…调度到系统中任æ„它认为åˆé€‚的地方去。
+ 这使得在ä¿ç•™ä½¿ç”¨å…¶ä»–CPU(如果必需且有å¯ç”¨ï¼‰èƒ½åŠ›çš„åŒæ—¶ï¼Œ
+ 还能从作用域局部性上获益。
+
+ 如果设置为1,作用域内的所有工作者将被ä¿è¯æ€»æ˜¯å¤„于作用域内。
+ 这在跨亲和性作用域会导致如功耗ã€è´Ÿè½½éš”离等方é¢çš„潜在影å“æ—¶
+ 会有用。严格的NUMA作用域也å¯ç”¨äºŽå’Œæ—§ç‰ˆå†…核中工作队列的行为
+ ä¿æŒä¸€è‡´ã€‚
+
+
+亲和性作用域与性能
+==================
+
+如果éžç»‘定工作队列的行为对ç»å¤§å¤šæ•°ä½¿ç”¨åœºæ™¯æ¥è¯´éƒ½æ˜¯æœ€ä¼˜çš„,
+ä¸éœ€è¦æ›´å¤šè°ƒèŠ‚,就完美了。很ä¸å¹¸ï¼Œåœ¨å½“å‰å†…核中,é‡åº¦ä½¿ç”¨
+工作队列时,需è¦åœ¨å±€éƒ¨æ€§å’Œåˆ©ç”¨çŽ‡é—´æ˜¾å¼åœ°ä½œä¸€ä¸ªæ˜Žæ˜¾çš„æƒè¡¡ã€‚
+
+更高的局部性带æ¥æ›´é«˜æ•ˆçŽ‡ï¼Œä¹Ÿå°±æ˜¯ç›¸åŒæ•°é‡çš„CPU周期内å¯ä»¥åš
+更多工作。然而,如果å‘起者没能将工作项充分地分散在亲和性
+作用域间,更高的局部性也å¯èƒ½å¸¦æ¥æ›´ä½Žçš„整体系统利用率。以下
+dm-crypt 的性能测试清楚地é˜æ˜Žäº†è¿™ä¸€å–èˆã€‚
+
+测试è¿è¡Œåœ¨ä¸€ä¸ª12æ ¸24线程ã€4个L3缓存的处ç†å™¨ï¼ˆAMD Ryzen
+9 3900x)上。为ä¿æŒä¸€è‡´æ€§ï¼Œå…³é—­CPU超频。 ``/dev/dm-0``
+是NVME SSD(三星 990 PRO)上创建,用 ``cryptsetup``
+以默认é…置打开的一个 dm-crypt 设备。
+
+
+场景 1: 机器上é布ç€æœ‰å……足的å‘起者和工作é‡
+------------------------------------------
+
+使用命令:::
+
+ $ fio --filename=/dev/dm-0 --direct=1 --rw=randrw --bs=32k --ioengine=libaio \
+ --iodepth=64 --runtime=60 --numjobs=24 --time_based --group_reporting \
+ --name=iops-test-job --verify=sha512
+
+这里有24个å‘起者,æ¯ä¸ªåŒæ—¶å‘èµ·64个IO。 ``--verify=sha512``
+使得 ``fio`` æ¯æ¬¡ç”Ÿæˆå’Œè¯»å›žå†…容å—å‘起者和 ``kcryptd``
+间的执行局部性影å“。下é¢æ˜¯åŸºäºŽä¸åŒ ``kcryptd`` 的亲和性
+作用域设置,å„ç»è¿‡äº”次测试得到的读å–带宽和CPU利用率数æ®ã€‚
+
+.. list-table::
+ :widths: 16 20 20
+ :header-rows: 1
+
+ * - 亲和性
+ - 带宽 (MiBps)
+ - CPU利用率(%)
+
+ * - system
+ - 1159.40 ±1.34
+ - 99.31 ±0.02
+
+ * - cache
+ - 1166.40 ±0.89
+ - 99.34 ±0.01
+
+ * - cache (strict)
+ - 1166.00 ±0.71
+ - 99.35 ±0.01
+
+在系统中分布ç€è¶³å¤Ÿå¤šå‘起者的情况下,ä¸è®ºä¸¥æ ¼ä¸Žå¦ï¼Œâ€œcacheâ€
+没有表现得更差。三ç§é…ç½®å‡ä½¿æ•´ä¸ªæœºå™¨è¾¾åˆ°é¥±å’Œï¼Œä½†ç”±äºŽæ高了
+局部性,缓存相关的两ç§æœ‰0.6%的(带宽)æå‡ã€‚
+
+
+场景 2: æ›´å°‘å‘起者,足以达到饱和的工作é‡
+----------------------------------------
+
+使用命令:::
+
+ $ fio --filename=/dev/dm-0 --direct=1 --rw=randrw --bs=32k \
+ --ioengine=libaio --iodepth=64 --runtime=60 --numjobs=8 \
+ --time_based --group_reporting --name=iops-test-job --verify=sha512
+
+与上一个场景唯一的区别是 ``--numjobs=8``。 å‘起者数é‡
+å‡å°‘为三分之一,但ä»ç„¶æœ‰è¶³ä»¥ä½¿ç³»ç»Ÿè¾¾åˆ°é¥±å’Œçš„工作总é‡ã€‚
+
+.. list-table::
+ :widths: 16 20 20
+ :header-rows: 1
+
+ * - 亲和性
+ - 带宽 (MiBps)
+ - CPU利用率(%)
+
+ * - system
+ - 1155.40 ±0.89
+ - 97.41 ±0.05
+
+ * - cache
+ - 1154.40 ±1.14
+ - 96.15 ±0.09
+
+ * - cache (strict)
+ - 1112.00 ±4.64
+ - 93.26 ±0.35
+
+这里有超过使系统达到饱和所需的工作é‡ã€‚“systemâ€å’Œâ€œcacheâ€
+都接近但并未使机器完全饱和。“cacheâ€æ¶ˆè€—æ›´å°‘çš„CPU但更高的
+效率使其得到和“systemâ€ç›¸åŒçš„带宽。
+
+八个å‘起者盘桓在四个L3缓存作用域间ä»ç„¶å…许“cache (strict)â€
+几乎使机器饱和,但缺少对工作的ä¿æŒï¼ˆä¸ç§»åˆ°ç©ºé—²å¤„ç†å™¨ä¸Šï¼‰
+开始带æ¥3.7%的带宽æŸå¤±ã€‚
+
+
+场景 3: æ›´å°‘å‘起者,ä¸å……足的工作é‡
+----------------------------------
+
+使用命令:::
+
+ $ fio --filename=/dev/dm-0 --direct=1 --rw=randrw --bs=32k \
+ --ioengine=libaio --iodepth=64 --runtime=60 --numjobs=4 \
+ --time_based --group_reporting --name=iops-test-job --verify=sha512
+
+å†æ¬¡ï¼Œå”¯ä¸€çš„区别是 ``--numjobs=4``。由于å‘起者å‡å°‘到四个,
+现在没有足以使系统饱和的工作é‡ï¼Œå¸¦å®½å˜å¾—ä¾èµ–于完æˆæ—¶å»¶ã€‚
+
+.. list-table::
+ :widths: 16 20 20
+ :header-rows: 1
+
+ * - 亲和性
+ - 带宽 (MiBps)
+ - CPU利用率(%)
+
+ * - system
+ - 993.60 ±1.82
+ - 75.49 ±0.06
+
+ * - cache
+ - 973.40 ±1.52
+ - 74.90 ±0.07
+
+ * - cache (strict)
+ - 828.20 ±4.49
+ - 66.84 ±0.29
+
+现在,局部性和利用率间的æƒè¡¡æ›´æ¸…晰了。“cacheâ€å±•ç¤ºå‡ºç›¸æ¯”
+“systemâ€2%的带宽æŸå¤±ï¼Œè€Œâ€œcache (strict)â€è·Œåˆ°20%。
+
+
+结论和建议
+----------
+
+在以上试验中,虽然一致并且也明显,但“cacheâ€äº²å’Œæ€§ä½œç”¨åŸŸ
+相比“systemâ€çš„性能优势并ä¸å¤§ã€‚然而,这影å“是ä¾èµ–于作用域
+é—´è·ç¦»çš„,在更å¤æ‚的处ç†å™¨æ‹“扑下å¯èƒ½æœ‰æ›´æ˜Žæ˜¾çš„å½±å“。
+
+虽然这些情形下缺少工作ä¿æŒæ˜¯æœ‰å处的,但比“cache (strict)â€
+好多了,而且最大化工作队列利用率的需求也并ä¸å¸¸è§ã€‚因此,
+“cacheâ€æ˜¯éžç»‘定池的默认亲和性作用域。
+
+* 由于ä¸å­˜åœ¨ä¸€ä¸ªé€‚用于大多数场景的选择,对于å¯èƒ½éœ€è¦æ¶ˆè€—
+ 大é‡CPU的工作队列,建议通过 ``apply_workqueue_attrs()``
+ 进行(专门)é…置,并考虑是å¦å¯ç”¨ ``WQ_SYSFS``。
+
+* 设置了严格“cpuâ€äº²å’Œæ€§ä½œç”¨åŸŸçš„éžç»‘定工作队列,它的行为与
+ ``WQ_CPU_INTENSIVE`` æ¯CPU工作队列一样。åŽè€…没有真正
+ 优势,而å‰è€…æ供了大幅度的çµæ´»æ€§ã€‚
+
+* 亲和性作用域是从Linux v6.5起引入的。为了模拟旧版行为,
+ å¯ä»¥ä½¿ç”¨ä¸¥æ ¼çš„“numaâ€äº²å’Œæ€§ä½œç”¨åŸŸã€‚
+
+* ä¸ä¸¥æ ¼çš„亲和性作用域中,缺少工作ä¿æŒå¤§æ¦‚缘于调度器。内核
+ 为什么没能维护好大多数场景下的工作ä¿æŒï¼ŒæŠŠäº‹æƒ…作对,还没有
+ ç†è®ºä¸Šçš„解释。因此,未æ¥è°ƒåº¦å™¨çš„改进å¯èƒ½ä¼šä½¿æˆ‘们ä¸å†éœ€è¦
+ 这些调节项。
+
+
+检查é…ç½®
+========
+
+使用 tools/workqueue/wq_dump.py(drgn脚本) æ¥æ£€æŸ¥æœª
+绑定CPU的亲和性é…置,工作者池,以åŠå·¥ä½œé˜Ÿåˆ—如何映射到池上: ::
+
+ $ tools/workqueue/wq_dump.py
+ Affinity Scopes
+ ===============
+ wq_unbound_cpumask=0000000f
+
+ CPU
+ nr_pods 4
+ pod_cpus [0]=00000001 [1]=00000002 [2]=00000004 [3]=00000008
+ pod_node [0]=0 [1]=0 [2]=1 [3]=1
+ cpu_pod [0]=0 [1]=1 [2]=2 [3]=3
+
+ SMT
+ nr_pods 4
+ pod_cpus [0]=00000001 [1]=00000002 [2]=00000004 [3]=00000008
+ pod_node [0]=0 [1]=0 [2]=1 [3]=1
+ cpu_pod [0]=0 [1]=1 [2]=2 [3]=3
+
+ CACHE (default)
+ nr_pods 2
+ pod_cpus [0]=00000003 [1]=0000000c
+ pod_node [0]=0 [1]=1
+ cpu_pod [0]=0 [1]=0 [2]=1 [3]=1
+
+ NUMA
+ nr_pods 2
+ pod_cpus [0]=00000003 [1]=0000000c
+ pod_node [0]=0 [1]=1
+ cpu_pod [0]=0 [1]=0 [2]=1 [3]=1
+
+ SYSTEM
+ nr_pods 1
+ pod_cpus [0]=0000000f
+ pod_node [0]=-1
+ cpu_pod [0]=0 [1]=0 [2]=0 [3]=0
+
+ Worker Pools
+ ============
+ pool[00] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 0
+ pool[01] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 0
+ pool[02] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 1
+ pool[03] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 1
+ pool[04] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 2
+ pool[05] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 2
+ pool[06] ref= 1 nice= 0 idle/workers= 3/ 3 cpu= 3
+ pool[07] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 3
+ pool[08] ref=42 nice= 0 idle/workers= 6/ 6 cpus=0000000f
+ pool[09] ref=28 nice= 0 idle/workers= 3/ 3 cpus=00000003
+ pool[10] ref=28 nice= 0 idle/workers= 17/ 17 cpus=0000000c
+ pool[11] ref= 1 nice=-20 idle/workers= 1/ 1 cpus=0000000f
+ pool[12] ref= 2 nice=-20 idle/workers= 1/ 1 cpus=00000003
+ pool[13] ref= 2 nice=-20 idle/workers= 1/ 1 cpus=0000000c
+
+ Workqueue CPU -> pool
+ =====================
+ [ workqueue \ CPU 0 1 2 3 dfl]
+ events percpu 0 2 4 6
+ events_highpri percpu 1 3 5 7
+ events_long percpu 0 2 4 6
+ events_unbound unbound 9 9 10 10 8
+ events_freezable percpu 0 2 4 6
+ events_power_efficient percpu 0 2 4 6
+ events_freezable_power_ percpu 0 2 4 6
+ rcu_gp percpu 0 2 4 6
+ rcu_par_gp percpu 0 2 4 6
+ slub_flushwq percpu 0 2 4 6
+ netns ordered 8 8 8 8 8
+ ...
+
+å‚è§å‘½ä»¤çš„帮助消æ¯ä»¥èŽ·å–更多信æ¯ã€‚
+
+
+监视
+====
+
+使用 tools/workqueue/wq_monitor.py æ¥ç›‘视工作队列的è¿è¡Œï¼š ::
+
+ $ tools/workqueue/wq_monitor.py events
+ total infl CPUtime CPUhog CMW/RPR mayday rescued
+ events 18545 0 6.1 0 5 - -
+ events_highpri 8 0 0.0 0 0 - -
+ events_long 3 0 0.0 0 0 - -
+ events_unbound 38306 0 0.1 - 7 - -
+ events_freezable 0 0 0.0 0 0 - -
+ events_power_efficient 29598 0 0.2 0 0 - -
+ events_freezable_power_ 10 0 0.0 0 0 - -
+ sock_diag_events 0 0 0.0 0 0 - -
+
+ total infl CPUtime CPUhog CMW/RPR mayday rescued
+ events 18548 0 6.1 0 5 - -
+ events_highpri 8 0 0.0 0 0 - -
+ events_long 3 0 0.0 0 0 - -
+ events_unbound 38322 0 0.1 - 7 - -
+ events_freezable 0 0 0.0 0 0 - -
+ events_power_efficient 29603 0 0.2 0 0 - -
+ events_freezable_power_ 10 0 0.0 0 0 - -
+ sock_diag_events 0 0 0.0 0 0 - -
+
+ ...
+
+å‚è§å‘½ä»¤çš„帮助消æ¯ä»¥èŽ·å–更多信æ¯ã€‚
+
+
调试
====
@@ -330,7 +675,6 @@ And with cmwq with ``@max_active`` >= 3, ::
工作队列ä¿è¯ï¼Œå¦‚果在工作项排队åŽæ»¡è¶³ä»¥ä¸‹æ¡ä»¶ï¼Œåˆ™å·¥ä½œé¡¹ä¸èƒ½é‡å…¥ï¼š
-
1. 工作函数没有被改å˜ã€‚
2. 没有人将该工作项排到å¦ä¸€ä¸ªå·¥ä½œé˜Ÿåˆ—中。
3. 该工作项尚未被é‡æ–°å¯åŠ¨ã€‚
diff --git a/MAINTAINERS b/MAINTAINERS
index 943921d642add..294e472d7de8d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -553,7 +553,7 @@ F: Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml
F: drivers/input/misc/adxl34x.c
ADXL355 THREE-AXIS DIGITAL ACCELEROMETER DRIVER
-M: Puranjay Mohan <puranjay12@gmail.com>
+M: Puranjay Mohan <puranjay@kernel.org>
L: linux-iio@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/iio/accel/adi,adxl355.yaml
@@ -3714,7 +3714,7 @@ F: drivers/iio/imu/bmi323/
BPF JIT for ARM
M: Russell King <linux@armlinux.org.uk>
-M: Puranjay Mohan <puranjay12@gmail.com>
+M: Puranjay Mohan <puranjay@kernel.org>
L: bpf@vger.kernel.org
S: Maintained
F: arch/arm/net/
@@ -3764,6 +3764,8 @@ X: arch/riscv/net/bpf_jit_comp64.c
BPF JIT for RISC-V (64-bit)
M: Björn Töpel <bjorn@kernel.org>
+R: Pu Lehui <pulehui@huawei.com>
+R: Puranjay Mohan <puranjay@kernel.org>
L: bpf@vger.kernel.org
S: Maintained
F: arch/riscv/net/
@@ -4199,7 +4201,6 @@ S: Supported
F: drivers/scsi/bnx2i/
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
-M: Ariel Elior <aelior@marvell.com>
M: Sudarsana Kalluru <skalluru@marvell.com>
M: Manish Chopra <manishc@marvell.com>
L: netdev@vger.kernel.org
@@ -15190,9 +15191,8 @@ F: drivers/scsi/myrb.*
F: drivers/scsi/myrs.*
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
-M: Chris Lee <christopher.lee@cspi.com>
L: netdev@vger.kernel.org
-S: Supported
+S: Orphan
W: https://www.cspi.com/ethernet-products/support/downloads/
F: drivers/net/ethernet/myricom/myri10ge/
@@ -16829,12 +16829,6 @@ S: Maintained
F: drivers/leds/leds-pca9532.c
F: include/linux/leds-pca9532.h
-PCA9541 I2C BUS MASTER SELECTOR DRIVER
-M: Guenter Roeck <linux@roeck-us.net>
-L: linux-i2c@vger.kernel.org
-S: Maintained
-F: drivers/i2c/muxes/i2c-mux-pca9541.c
-
PCI DRIVER FOR AARDVARK (Marvell Armada 3700)
M: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
M: Pali Rohár <pali@kernel.org>
@@ -17911,7 +17905,7 @@ F: Documentation/devicetree/bindings/leds/irled/pwm-ir-tx.yaml
F: drivers/media/rc/pwm-ir-tx.c
PWM SUBSYSTEM
-M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+M: Uwe Kleine-König <ukleinek@kernel.org>
L: linux-pwm@vger.kernel.org
S: Maintained
Q: https://patchwork.ozlabs.org/project/linux-pwm/list/
@@ -18035,7 +18029,6 @@ S: Supported
F: drivers/scsi/qedi/
QLOGIC QL4xxx ETHERNET DRIVER
-M: Ariel Elior <aelior@marvell.com>
M: Manish Chopra <manishc@marvell.com>
L: netdev@vger.kernel.org
S: Supported
@@ -18045,7 +18038,6 @@ F: include/linux/qed/
QLOGIC QL4xxx RDMA DRIVER
M: Michal Kalderon <mkalderon@marvell.com>
-M: Ariel Elior <aelior@marvell.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/qedr/
@@ -20215,7 +20207,6 @@ F: include/linux/platform_data/simplefb.h
SIOX
M: Thorsten Scherer <t.scherer@eckelmann.de>
-M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
S: Supported
F: drivers/gpio/gpio-siox.c
@@ -21965,7 +21956,7 @@ F: include/linux/soc/ti/ti_sci_inta_msi.h
F: include/linux/soc/ti/ti_sci_protocol.h
TEXAS INSTRUMENTS' TMP117 TEMPERATURE SENSOR DRIVER
-M: Puranjay Mohan <puranjay12@gmail.com>
+M: Puranjay Mohan <puranjay@kernel.org>
L: linux-iio@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/iio/temperature/ti,tmp117.yaml
@@ -24507,6 +24498,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/har
F: Documentation/admin-guide/LSM/Yama.rst
F: security/yama/
+YAML NETLINK (YNL)
+M: Donald Hunter <donald.hunter@gmail.com>
+M: Jakub Kicinski <kuba@kernel.org>
+F: Documentation/netlink/
+F: Documentation/userspace-api/netlink/intro-specs.rst
+F: Documentation/userspace-api/netlink/specs.rst
+F: tools/net/ynl/
+
YEALINK PHONE DRIVER
M: Henk Vergonet <Henk.Vergonet@gmail.com>
L: usbb2k-api-dev@nongnu.org
diff --git a/Makefile b/Makefile
index 43b10f3d438cf..40fb2ca6fe4c0 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 65afb1de48b36..30f7930275d83 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -9,6 +9,14 @@
#
source "arch/$(SRCARCH)/Kconfig"
+config ARCH_CONFIGURES_CPU_MITIGATIONS
+ bool
+
+if !ARCH_CONFIGURES_CPU_MITIGATIONS
+config CPU_MITIGATIONS
+ def_bool y
+endif
+
menu "General architecture-dependent options"
config ARCH_HAS_SUBPAGE_FAULTS
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 99d2845f3feb9..4092bec198bec 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -6,7 +6,6 @@
config ARC
def_bool y
select ARC_TIMERS
- select ARCH_HAS_CPU_CACHE_ALIASING
select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DMA_PREP_COHERENT
diff --git a/arch/arc/boot/Makefile b/arch/arc/boot/Makefile
index 5648748c285f5..5a8550124b73e 100644
--- a/arch/arc/boot/Makefile
+++ b/arch/arc/boot/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-# uImage build relies on mkimage being availble on your host for ARC target
+# uImage build relies on mkimage being available on your host for ARC target
# You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
-# and make sure it's reacable from your PATH
+# and make sure it's reachable from your PATH
OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index 3434c8131ecd5..c0a812674ce9e 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -119,9 +119,9 @@
/*
* The DW APB ICTL intc on MB is connected to CPU intc via a
* DT "invisible" DW APB GPIO block, configured to simply pass thru
- * interrupts - setup accordinly in platform init (plat-axs10x/ax10x.c)
+ * interrupts - setup accordingly in platform init (plat-axs10x/ax10x.c)
*
- * So here we mimic a direct connection betwen them, ignoring the
+ * So here we mimic a direct connection between them, ignoring the
* ABPG GPIO. Thus set "interrupts = <24>" (DW APB GPIO to core)
* instead of "interrupts = <12>" (DW APB ICTL to DW APB GPIO)
*
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 6691f42550778..41b980df862b1 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -205,7 +205,6 @@
};
gmac: ethernet@8000 {
- #interrupt-cells = <1>;
compatible = "snps,dwmac";
reg = <0x8000 0x2000>;
interrupts = <10>;
diff --git a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
index 90a412026e643..0e0e2d337bf87 100644
--- a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
@@ -113,7 +113,7 @@
/*
* Embedded Vision subsystem UIO mappings; only relevant for EV VDK
*
- * This node is intentionally put outside of MB above becase
+ * This node is intentionally put outside of MB above because
* it maps areas outside of MB's 0xez-0xfz.
*/
uio_ev: uio@d0000000 {
diff --git a/arch/arc/include/asm/cachetype.h b/arch/arc/include/asm/cachetype.h
deleted file mode 100644
index 05fc7ed597125..0000000000000
--- a/arch/arc/include/asm/cachetype.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARC_CACHETYPE_H
-#define __ASM_ARC_CACHETYPE_H
-
-#include <linux/types.h>
-
-#define cpu_dcache_is_aliasing() true
-
-#endif
diff --git a/arch/arc/include/asm/dsp.h b/arch/arc/include/asm/dsp.h
index 202c78e567045..f496dbc4640b2 100644
--- a/arch/arc/include/asm/dsp.h
+++ b/arch/arc/include/asm/dsp.h
@@ -12,7 +12,7 @@
/*
* DSP-related saved registers - need to be saved only when you are
* scheduled out.
- * structure fields name must correspond to aux register defenitions for
+ * structure fields name must correspond to aux register definitions for
* automatic offset calculation in DSP_AUX_SAVE_RESTORE macros
*/
struct dsp_callee_regs {
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index 92c3e9f132521..00946fe04c9b2 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -7,7 +7,7 @@
* Stack switching code can no longer reliably rely on the fact that
* if we are NOT in user mode, stack is switched to kernel mode.
* e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
- * it's prologue including stack switching from user mode
+ * its prologue including stack switching from user mode
*
* Vineetg: Aug 28th 2008: Bug #94984
* -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
@@ -143,7 +143,7 @@
* 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
* 3. But before it could switch SP from USER to KERNEL stack
* a L2 IRQ "Interrupts" L1
- * Thay way although L2 IRQ happened in Kernel mode, stack is still
+ * That way although L2 IRQ happened in Kernel mode, stack is still
* not switched.
* To handle this, we may need to switch stack even if in kernel mode
* provided SP has values in range of USER mode stack ( < 0x7000_0000 )
@@ -173,7 +173,7 @@
GET_CURR_TASK_ON_CPU r9
- /* With current tsk in r9, get it's kernel mode stack base */
+ /* With current tsk in r9, get its kernel mode stack base */
GET_TSK_STACK_BASE r9, r9
/* save U mode SP @ pt_regs->sp */
@@ -282,7 +282,7 @@
* NOTE:
*
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
- * for memory load operations. If used in that way interrupts are deffered
+ * for memory load operations. If used in that way interrupts are deferred
* by hardware and that is not good.
*-------------------------------------------------------------*/
.macro EXCEPTION_EPILOGUE
@@ -350,7 +350,7 @@
* NOTE:
*
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
- * for memory load operations. If used in that way interrupts are deffered
+ * for memory load operations. If used in that way interrupts are deferred
* by hardware and that is not good.
*-------------------------------------------------------------*/
.macro INTERRUPT_EPILOGUE LVL
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index cf1ba376e992c..38c35722cebf0 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -7,7 +7,7 @@
#ifndef __ASM_ARC_ENTRY_H
#define __ASM_ARC_ENTRY_H
-#include <asm/unistd.h> /* For NR_syscalls defination */
+#include <asm/unistd.h> /* For NR_syscalls definition */
#include <asm/arcregs.h>
#include <asm/ptrace.h>
#include <asm/processor.h> /* For VMALLOC_START */
@@ -56,7 +56,7 @@
.endm
/*-------------------------------------------------------------
- * given a tsk struct, get to the base of it's kernel mode stack
+ * given a tsk struct, get to the base of its kernel mode stack
* tsk->thread_info is really a PAGE, whose bottom hoists stack
* which grows upwards towards thread_info
*------------------------------------------------------------*/
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index c574712ad8658..9cd79263acba8 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -10,7 +10,7 @@
* ARCv2 can support 240 interrupts in the core interrupts controllers and
* 128 interrupts in IDU. Thus 512 virtual IRQs must be enough for most
* configurations of boards.
- * This doesnt affect ARCompact, but we change it to same value
+ * This doesn't affect ARCompact, but we change it to same value
*/
#define NR_IRQS 512
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
index 0d63e568d64cb..936a2f21f315e 100644
--- a/arch/arc/include/asm/irqflags-compact.h
+++ b/arch/arc/include/asm/irqflags-compact.h
@@ -46,7 +46,7 @@
* IRQ Control Macros
*
* All of them have "memory" clobber (compiler barrier) which is needed to
- * ensure that LD/ST requiring irq safetly (R-M-W when LLSC is not available)
+ * ensure that LD/ST requiring irq safety (R-M-W when LLSC is not available)
* are redone after IRQs are re-enabled (and gcc doesn't reuse stale register)
*
* Noted at the time of Abilis Timer List corruption
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index dda471f5f05bb..9963bb1a5733f 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -165,7 +165,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* for retiring-mm. However destroy_context( ) still needs to do that because
* between mm_release( ) = >deactive_mm( ) and
* mmput => .. => __mmdrop( ) => destroy_context( )
- * there is a good chance that task gets sched-out/in, making it's ASID valid
+ * there is a good chance that task gets sched-out/in, making its ASID valid
* again (this teased me for a whole day).
*/
diff --git a/arch/arc/include/asm/pgtable-bits-arcv2.h b/arch/arc/include/asm/pgtable-bits-arcv2.h
index f3eea3f30b2e2..8ebec1b21d246 100644
--- a/arch/arc/include/asm/pgtable-bits-arcv2.h
+++ b/arch/arc/include/asm/pgtable-bits-arcv2.h
@@ -66,7 +66,7 @@
* Other rules which cause the divergence from 1:1 mapping
*
* 1. Although ARC700 can do exclusive execute/write protection (meaning R
- * can be tracked independet of X/W unlike some other CPUs), still to
+ * can be tracked independently of X/W unlike some other CPUs), still to
* keep things consistent with other archs:
* -Write implies Read: W => R
* -Execute implies Read: X => R
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 00b9318e551e7..cf79df0b25705 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -169,7 +169,7 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
return *(unsigned long *)((unsigned long)regs + offset);
}
-extern int syscall_trace_entry(struct pt_regs *);
+extern int syscall_trace_enter(struct pt_regs *);
extern void syscall_trace_exit(struct pt_regs *);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/include/asm/shmparam.h b/arch/arc/include/asm/shmparam.h
index 8b0251464ffd4..719112af0f41a 100644
--- a/arch/arc/include/asm/shmparam.h
+++ b/arch/arc/include/asm/shmparam.h
@@ -6,7 +6,7 @@
#ifndef __ARC_ASM_SHMPARAM_H
#define __ARC_ASM_SHMPARAM_H
-/* Handle upto 2 cache bins */
+/* Handle up to 2 cache bins */
#define SHMLBA (2 * PAGE_SIZE)
/* Enforce SHMLBA in shmat */
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index e0913f52c2cdd..990f834909f08 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -77,7 +77,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
/*
* ARC700 doesn't support atomic Read-Modify-Write ops.
- * Originally Interrupts had to be disabled around code to gaurantee atomicity.
+ * Originally Interrupts had to be disabled around code to guarantee atomicity.
* The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
* based on retry-if-irq-in-atomic (with hardware assist).
* However despite these, we provide the IRQ disabling variant
@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
* support needed.
*
* (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
- * gaurantted by the platform (not something which core handles).
+ * guaranteed by the platform (not something which core handles).
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
* disabling for atomicity.
*
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 4c530cf131f33..12daaf3a61eaf 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -38,7 +38,7 @@
struct thread_info {
unsigned long flags; /* low level flags */
unsigned long ksp; /* kernel mode stack top in __switch_to */
- int preempt_count; /* 0 => preemptable, <0 => BUG */
+ int preempt_count; /* 0 => preemptible, <0 => BUG */
int cpu; /* current CPU */
unsigned long thr_ptr; /* TLS ptr */
struct task_struct *task; /* main task structure */
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h
index 02109cd48ee12..8d1f1ef44ba75 100644
--- a/arch/arc/include/uapi/asm/swab.h
+++ b/arch/arc/include/uapi/asm/swab.h
@@ -62,7 +62,7 @@
* 8051fdc4: st r2,[r1,20] ; Mem op : save result back to mem
*
* Joern suggested a better "C" algorithm which is great since
- * (1) It is portable to any architecure
+ * (1) It is portable to any architecture
* (2) At the same time it takes advantage of ARC ISA (rotate intrns)
*/
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index 2e49c81c8086b..e238b5fd3c8cf 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -5,7 +5,7 @@
* Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
*/
-#include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */
+#include <linux/linkage.h> /* ARC_{ENTRY,EXIT} */
#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,TRAP...} */
#include <asm/errno.h>
#include <asm/arcregs.h>
@@ -31,7 +31,7 @@ VECTOR res_service ; Reset Vector
VECTOR mem_service ; Mem exception
VECTOR instr_service ; Instrn Error
VECTOR EV_MachineCheck ; Fatal Machine check
-VECTOR EV_TLBMissI ; Intruction TLB miss
+VECTOR EV_TLBMissI ; Instruction TLB miss
VECTOR EV_TLBMissD ; Data TLB miss
VECTOR EV_TLBProtV ; Protection Violation
VECTOR EV_PrivilegeV ; Privilege Violation
@@ -76,11 +76,11 @@ ENTRY(handle_interrupt)
# query in hard ISR path would return false (since .IE is set) which would
# trips genirq interrupt handling asserts.
#
- # So do a "soft" disable of interrutps here.
+ # So do a "soft" disable of interrupts here.
#
# Note this disable is only for consistent book-keeping as further interrupts
# will be disabled anyways even w/o this. Hardware tracks active interrupts
- # seperately in AUX_IRQ_ACT.active and will not take new interrupts
+ # separately in AUX_IRQ_ACT.active and will not take new interrupts
# unless this one returns (or higher prio becomes pending in 2-prio scheme)
IRQ_DISABLE
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 089f6680518f8..3c7e74aba6794 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -95,7 +95,7 @@ ENTRY(EV_MachineCheck)
lr r0, [efa]
mov r1, sp
- ; MC excpetions disable MMU
+ ; MC exceptions disable MMU
ARC_MMU_REENABLE r3
lsr r3, r10, 8
@@ -209,7 +209,7 @@ trap_with_param:
; ---------------------------------------------
; syscall TRAP
-; ABI: (r0-r7) upto 8 args, (r8) syscall number
+; ABI: (r0-r7) up to 8 args, (r8) syscall number
; ---------------------------------------------
ENTRY(EV_Trap)
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 9152782444b55..8d541f53fae3e 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -165,7 +165,7 @@ ENTRY(first_lines_of_secondary)
; setup stack (fp, sp)
mov fp, 0
- ; set it's stack base to tsk->thread_info bottom
+ ; set its stack base to tsk->thread_info bottom
GET_TSK_STACK_BASE r0, sp
j start_kernel_secondary
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 678898757e473..f324f0e3341a3 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -56,7 +56,7 @@ void arc_init_IRQ(void)
WRITE_AUX(AUX_IRQ_CTRL, ictrl);
/*
- * ARCv2 core intc provides multiple interrupt priorities (upto 16).
+ * ARCv2 core intc provides multiple interrupt priorities (up to 16).
* Typical builds though have only two levels (0-high, 1-low)
* Linux by default uses lower prio 1 for most irqs, reserving 0 for
* NMI style interrupts in future (say perf)
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
index e71d64119d71a..f8e2960832d94 100644
--- a/arch/arc/kernel/kprobes.c
+++ b/arch/arc/kernel/kprobes.c
@@ -190,7 +190,8 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
}
}
-int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
+static int
+__kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
{
struct kprobe *p;
struct kprobe_ctlblk *kcb;
@@ -241,8 +242,8 @@ int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
return 0;
}
-static int __kprobes arc_post_kprobe_handler(unsigned long addr,
- struct pt_regs *regs)
+static int
+__kprobes arc_post_kprobe_handler(unsigned long addr, struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index adff957962da8..6e5a651cd75cf 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -38,7 +38,7 @@
* (based on a specific RTL build)
* Below is the static map between perf generic/arc specific event_id and
* h/w condition names.
- * At the time of probe, we loop thru each index and find it's name to
+ * At the time of probe, we loop thru each index and find its name to
* complete the mapping of perf event_id to h/w index as latter is needed
* to program the counter really
*/
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index d08a5092c2b4d..7b6a9beba9db6 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -390,7 +390,7 @@ static void arc_chk_core_config(struct cpuinfo_arc *info)
#ifdef CONFIG_ARC_HAS_DCCM
/*
* DCCM can be arbit placed in hardware.
- * Make sure it's placement/sz matches what Linux is built with
+ * Make sure its placement/sz matches what Linux is built with
*/
if ((unsigned int)__arc_dccm_base != info->dccm.base)
panic("Linux built with incorrect DCCM Base address\n");
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 8f6f4a5429646..fefa705a86385 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -8,15 +8,16 @@
*
* vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
* -do_signal() supports TIF_RESTORE_SIGMASK
- * -do_signal() no loner needs oldset, required by OLD sys_sigsuspend
- * -sys_rt_sigsuspend() now comes from generic code, so discard arch implemen
+ * -do_signal() no longer needs oldset, required by OLD sys_sigsuspend
+ * -sys_rt_sigsuspend() now comes from generic code, so discard arch
+ * implementation
* -sys_sigsuspend() no longer needs to fudge ptregs, hence that arg removed
* -sys_sigsuspend() no longer loops for do_signal(), sets TIF_xxx and leaves
* the job to do_signal()
*
* vineetg: July 2009
* -Modified Code to support the uClibc provided userland sigreturn stub
- * to avoid kernel synthesing it on user stack at runtime, costing TLB
+ * to avoid kernel synthesizing it on user stack at runtime, costing TLB
* probes and Cache line flushes.
*
* vineetg: July 2009
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
index 9b9570b79362e..a19751e824fb4 100644
--- a/arch/arc/kernel/traps.c
+++ b/arch/arc/kernel/traps.c
@@ -89,7 +89,7 @@ int do_misaligned_access(unsigned long address, struct pt_regs *regs,
/*
* Entry point for miscll errors such as Nested Exceptions
- * -Duplicate TLB entry is handled seperately though
+ * -Duplicate TLB entry is handled separately though
*/
void do_machine_check_fault(unsigned long address, struct pt_regs *regs)
{
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
index 549c3f4079186..61a1b2b96e1d8 100644
--- a/arch/arc/kernel/vmlinux.lds.S
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -41,8 +41,8 @@ SECTIONS
#endif
/*
- * The reason for having a seperate subsection .init.ramfs is to
- * prevent objump from including it in kernel dumps
+ * The reason for having a separate subsection .init.ramfs is to
+ * prevent objdump from including it in kernel dumps
*
* Reason for having .init.ramfs above .init is to make sure that the
* binary blob is tucked away to one side, reducing the displacement
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index ad702b49aeb3b..cae4a7aae0ed4 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -212,7 +212,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long flags;
/* If range @start to @end is more than 32 TLB entries deep,
- * its better to move to a new ASID rather than searching for
+ * it's better to move to a new ASID rather than searching for
* individual entries and then shooting them down
*
* The calc above is rough, doesn't account for unaligned parts,
@@ -408,7 +408,7 @@ static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *p
* -More importantly it makes this handler inconsistent with fast-path
* TLB Refill handler which always deals with "current"
*
- * Lets see the use cases when current->mm != vma->mm and we land here
+ * Let's see the use cases when current->mm != vma->mm and we land here
* 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
* Here VM wants to pre-install a TLB entry for user stack while
* current->mm still points to pre-execve mm (hence the condition).
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index e054780a8fe0c..dc65e87a531fd 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -5,19 +5,19 @@
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* Vineetg: April 2011 :
- * -MMU v1: moved out legacy code into a seperate file
+ * -MMU v1: moved out legacy code into a separate file
* -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
* helps avoid a shift when preparing PD0 from PTE
*
* Vineetg: July 2009
- * -For MMU V2, we need not do heuristics at the time of commiting a D-TLB
- * entry, so that it doesn't knock out it's I-TLB entry
+ * -For MMU V2, we need not do heuristics at the time of committing a D-TLB
+ * entry, so that it doesn't knock out its I-TLB entry
* -Some more fine tuning:
* bmsk instead of add, asl.cc instead of branch, delay slot utilise etc
*
* Vineetg: July 2009
* -Practically rewrote the I/D TLB Miss handlers
- * Now 40 and 135 instructions a peice as compared to 131 and 449 resp.
+ * Now 40 and 135 instructions apiece as compared to 131 and 449 resp.
* Hence Leaner by 1.5 K
* Used Conditional arithmetic to replace excessive branching
* Also used short instructions wherever possible
diff --git a/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts b/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts
index 4f609e9e510ef..009d2c8324210 100644
--- a/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama7g54_curiosity.dts
@@ -242,7 +242,7 @@
regulator-state-standby {
regulator-on-in-suspend;
- regulator-suspend-voltage = <1150000>;
+ regulator-suspend-microvolt = <1150000>;
regulator-mode = <4>;
};
@@ -263,7 +263,7 @@
regulator-state-standby {
regulator-on-in-suspend;
- regulator-suspend-voltage = <1050000>;
+ regulator-suspend-microvolt = <1050000>;
regulator-mode = <4>;
};
@@ -280,7 +280,7 @@
regulator-always-on;
regulator-state-standby {
- regulator-suspend-voltage = <1800000>;
+ regulator-suspend-microvolt = <1800000>;
regulator-on-in-suspend;
};
@@ -296,7 +296,7 @@
regulator-always-on;
regulator-state-standby {
- regulator-suspend-voltage = <3300000>;
+ regulator-suspend-microvolt = <3300000>;
regulator-on-in-suspend;
};
diff --git a/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts b/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts
index 217e9b96c61e5..20b2497657ae4 100644
--- a/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts
@@ -293,7 +293,7 @@
regulator-state-standby {
regulator-on-in-suspend;
- regulator-suspend-voltage = <1150000>;
+ regulator-suspend-microvolt = <1150000>;
regulator-mode = <4>;
};
@@ -314,7 +314,7 @@
regulator-state-standby {
regulator-on-in-suspend;
- regulator-suspend-voltage = <1050000>;
+ regulator-suspend-microvolt = <1050000>;
regulator-mode = <4>;
};
@@ -331,7 +331,7 @@
regulator-always-on;
regulator-state-standby {
- regulator-suspend-voltage = <1800000>;
+ regulator-suspend-microvolt = <1800000>;
regulator-on-in-suspend;
};
@@ -346,7 +346,7 @@
regulator-max-microvolt = <3700000>;
regulator-state-standby {
- regulator-suspend-voltage = <1800000>;
+ regulator-suspend-microvolt = <1800000>;
regulator-on-in-suspend;
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-common.dtsi
index 3fdece5bd31f9..5248a058230c8 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-common.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-common.dtsi
@@ -805,6 +805,7 @@
&pinctrl_usb_pwr>;
dr_mode = "host";
power-active-high;
+ over-current-active-low;
disable-over-current;
status = "okay";
};
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 01516f83a95ac..deeb8f292454b 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -871,16 +871,11 @@ static inline void emit_a32_alu_r64(const bool is64, const s8 dst[],
}
/* dst = src (4 bytes)*/
-static inline void emit_a32_mov_r(const s8 dst, const s8 src, const u8 off,
- struct jit_ctx *ctx) {
+static inline void emit_a32_mov_r(const s8 dst, const s8 src, struct jit_ctx *ctx) {
const s8 *tmp = bpf2a32[TMP_REG_1];
s8 rt;
rt = arm_bpf_get_reg32(src, tmp[0], ctx);
- if (off && off != 32) {
- emit(ARM_LSL_I(rt, rt, 32 - off), ctx);
- emit(ARM_ASR_I(rt, rt, 32 - off), ctx);
- }
arm_bpf_put_reg32(dst, rt, ctx);
}
@@ -889,15 +884,15 @@ static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
const s8 src[],
struct jit_ctx *ctx) {
if (!is64) {
- emit_a32_mov_r(dst_lo, src_lo, 0, ctx);
+ emit_a32_mov_r(dst_lo, src_lo, ctx);
if (!ctx->prog->aux->verifier_zext)
/* Zero out high 4 bytes */
emit_a32_mov_i(dst_hi, 0, ctx);
} else if (__LINUX_ARM_ARCH__ < 6 &&
ctx->cpu_architecture < CPU_ARCH_ARMv5TE) {
/* complete 8 byte move */
- emit_a32_mov_r(dst_lo, src_lo, 0, ctx);
- emit_a32_mov_r(dst_hi, src_hi, 0, ctx);
+ emit_a32_mov_r(dst_lo, src_lo, ctx);
+ emit_a32_mov_r(dst_hi, src_hi, ctx);
} else if (is_stacked(src_lo) && is_stacked(dst_lo)) {
const u8 *tmp = bpf2a32[TMP_REG_1];
@@ -917,17 +912,52 @@ static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
static inline void emit_a32_movsx_r64(const bool is64, const u8 off, const s8 dst[], const s8 src[],
struct jit_ctx *ctx) {
const s8 *tmp = bpf2a32[TMP_REG_1];
- const s8 *rt;
+ s8 rs;
+ s8 rd;
- rt = arm_bpf_get_reg64(dst, tmp, ctx);
+ if (is_stacked(dst_lo))
+ rd = tmp[1];
+ else
+ rd = dst_lo;
+ rs = arm_bpf_get_reg32(src_lo, rd, ctx);
+ /* rs may be one of src[1], dst[1], or tmp[1] */
+
+ /* Sign extend rs if needed. If off == 32, lower 32-bits of src are moved to dst and sign
+ * extension only happens in the upper 64 bits.
+ */
+ if (off != 32) {
+ /* Sign extend rs into rd */
+ emit(ARM_LSL_I(rd, rs, 32 - off), ctx);
+ emit(ARM_ASR_I(rd, rd, 32 - off), ctx);
+ } else {
+ rd = rs;
+ }
+
+ /* Write rd to dst_lo
+ *
+ * Optimization:
+ * Assume:
+ * 1. dst == src and stacked.
+ * 2. off == 32
+ *
+ * In this case src_lo was loaded into rd(tmp[1]) but rd was not sign extended as off==32.
+ * So, we don't need to write rd back to dst_lo as they have the same value.
+ * This saves us one str instruction.
+ */
+ if (dst_lo != src_lo || off != 32)
+ arm_bpf_put_reg32(dst_lo, rd, ctx);
- emit_a32_mov_r(dst_lo, src_lo, off, ctx);
if (!is64) {
if (!ctx->prog->aux->verifier_zext)
/* Zero out high 4 bytes */
emit_a32_mov_i(dst_hi, 0, ctx);
} else {
- emit(ARM_ASR_I(rt[0], rt[1], 31), ctx);
+ if (is_stacked(dst_hi)) {
+ emit(ARM_ASR_I(tmp[0], rd, 31), ctx);
+ arm_bpf_put_reg32(dst_hi, tmp[0], ctx);
+ } else {
+ emit(ARM_ASR_I(dst_hi, rd, 31), ctx);
+ }
}
}
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
index bfc5c81a5bd4e..8141926e4ef14 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -1672,7 +1672,7 @@
<&clk IMX8MP_CLK_MEDIA_MIPI_PHY1_REF_ROOT>,
<&clk IMX8MP_CLK_MEDIA_AXI_ROOT>;
clock-names = "pclk", "wrap", "phy", "axi";
- assigned-clocks = <&clk IMX8MP_CLK_MEDIA_CAM1_PIX>,
+ assigned-clocks = <&clk IMX8MP_CLK_MEDIA_CAM2_PIX>,
<&clk IMX8MP_CLK_MEDIA_MIPI_PHY1_REF>;
assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_1000M>,
<&clk IMX8MP_CLK_24M>;
diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
index 0c38f7b517637..234e3b23d7a8d 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
@@ -129,7 +129,7 @@
};
&pio {
- eth_default: eth_default {
+ eth_default: eth-default-pins {
tx_pins {
pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GBE_TXD3>,
<MT2712_PIN_72_GBE_TXD2__FUNC_GBE_TXD2>,
@@ -156,7 +156,7 @@
};
};
- eth_sleep: eth_sleep {
+ eth_sleep: eth-sleep-pins {
tx_pins {
pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GPIO71>,
<MT2712_PIN_72_GBE_TXD2__FUNC_GPIO72>,
@@ -182,14 +182,14 @@
};
};
- usb0_id_pins_float: usb0_iddig {
+ usb0_id_pins_float: usb0-iddig-pins {
pins_iddig {
pinmux = <MT2712_PIN_12_IDDIG_P0__FUNC_IDDIG_A>;
bias-pull-up;
};
};
- usb1_id_pins_float: usb1_iddig {
+ usb1_id_pins_float: usb1-iddig-pins {
pins_iddig {
pinmux = <MT2712_PIN_14_IDDIG_P1__FUNC_IDDIG_B>;
bias-pull-up;
diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
index 6d218caa198cf..082672efba0a3 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
@@ -249,10 +249,11 @@
#clock-cells = <1>;
};
- infracfg: syscon@10001000 {
+ infracfg: clock-controller@10001000 {
compatible = "mediatek,mt2712-infracfg", "syscon";
reg = <0 0x10001000 0 0x1000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
pericfg: syscon@10003000 {
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
index 3ee9266fa8e98..917fa39a74f8d 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
@@ -252,7 +252,7 @@
clock-names = "hif_sel";
};
- cir: cir@10009000 {
+ cir: ir-receiver@10009000 {
compatible = "mediatek,mt7622-cir";
reg = <0 0x10009000 0 0x1000>;
interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_LOW>;
@@ -283,16 +283,14 @@
};
};
- apmixedsys: apmixedsys@10209000 {
- compatible = "mediatek,mt7622-apmixedsys",
- "syscon";
+ apmixedsys: clock-controller@10209000 {
+ compatible = "mediatek,mt7622-apmixedsys";
reg = <0 0x10209000 0 0x1000>;
#clock-cells = <1>;
};
- topckgen: topckgen@10210000 {
- compatible = "mediatek,mt7622-topckgen",
- "syscon";
+ topckgen: clock-controller@10210000 {
+ compatible = "mediatek,mt7622-topckgen";
reg = <0 0x10210000 0 0x1000>;
#clock-cells = <1>;
};
@@ -515,7 +513,6 @@
<&pericfg CLK_PERI_AUXADC_PD>;
clock-names = "therm", "auxadc";
resets = <&pericfg MT7622_PERI_THERM_SW_RST>;
- reset-names = "therm";
mediatek,auxadc = <&auxadc>;
mediatek,apmixedsys = <&apmixedsys>;
nvmem-cells = <&thermal_calibration>;
@@ -734,9 +731,8 @@
power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
};
- ssusbsys: ssusbsys@1a000000 {
- compatible = "mediatek,mt7622-ssusbsys",
- "syscon";
+ ssusbsys: clock-controller@1a000000 {
+ compatible = "mediatek,mt7622-ssusbsys";
reg = <0 0x1a000000 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
@@ -793,9 +789,8 @@
};
};
- pciesys: pciesys@1a100800 {
- compatible = "mediatek,mt7622-pciesys",
- "syscon";
+ pciesys: clock-controller@1a100800 {
+ compatible = "mediatek,mt7622-pciesys";
reg = <0 0x1a100800 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
@@ -921,12 +916,13 @@
};
};
- hifsys: syscon@1af00000 {
- compatible = "mediatek,mt7622-hifsys", "syscon";
+ hifsys: clock-controller@1af00000 {
+ compatible = "mediatek,mt7622-hifsys";
reg = <0 0x1af00000 0 0x70>;
+ #clock-cells = <1>;
};
- ethsys: syscon@1b000000 {
+ ethsys: clock-controller@1b000000 {
compatible = "mediatek,mt7622-ethsys",
"syscon";
reg = <0 0x1b000000 0 0x1000>;
@@ -966,9 +962,7 @@
};
eth: ethernet@1b100000 {
- compatible = "mediatek,mt7622-eth",
- "mediatek,mt2701-eth",
- "syscon";
+ compatible = "mediatek,mt7622-eth";
reg = <0 0x1b100000 0 0x20000>;
interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 224 IRQ_TYPE_LEVEL_LOW>,
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts
index e04b1c0c0ebbf..ed79ad1ae8716 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts
@@ -146,19 +146,19 @@
&cpu_thermal {
cooling-maps {
- cpu-active-high {
+ map-cpu-active-high {
/* active: set fan to cooling level 2 */
cooling-device = <&fan 2 2>;
trip = <&cpu_trip_active_high>;
};
- cpu-active-med {
+ map-cpu-active-med {
/* active: set fan to cooling level 1 */
cooling-device = <&fan 1 1>;
trip = <&cpu_trip_active_med>;
};
- cpu-active-low {
+ map-cpu-active-low {
/* active: set fan to cooling level 0 */
cooling-device = <&fan 0 0>;
trip = <&cpu_trip_active_low>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
index b3f416b9a7a4d..559990dcd1d17 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
@@ -332,9 +332,8 @@
reg = <0 0x1100c800 0 0x800>;
interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&infracfg CLK_INFRA_THERM_CK>,
- <&infracfg CLK_INFRA_ADC_26M_CK>,
- <&infracfg CLK_INFRA_ADC_FRC_CK>;
- clock-names = "therm", "auxadc", "adc_32k";
+ <&infracfg CLK_INFRA_ADC_26M_CK>;
+ clock-names = "therm", "auxadc";
nvmem-cells = <&thermal_calibration>;
nvmem-cell-names = "calibration-data";
#thermal-sensor-cells = <1>;
@@ -492,8 +491,6 @@
compatible = "mediatek,mt7986-ethsys",
"syscon";
reg = <0 0x15000000 0 0x1000>;
- #address-cells = <1>;
- #size-cells = <1>;
#clock-cells = <1>;
#reset-cells = <1>;
};
@@ -556,7 +553,6 @@
<&topckgen CLK_TOP_SGM_325M_SEL>;
assigned-clock-parents = <&apmixedsys CLK_APMIXED_NET2PLL>,
<&apmixedsys CLK_APMIXED_SGMPLL>;
- #reset-cells = <1>;
#address-cells = <1>;
#size-cells = <0>;
mediatek,ethsys = <&ethsys>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
index 6bd7424ef66c5..100191c6453ba 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
@@ -433,7 +433,6 @@
};
&mt6358_vgpu_reg {
- regulator-min-microvolt = <625000>;
regulator-max-microvolt = <900000>;
regulator-coupled-with = <&mt6358_vsram_gpu_reg>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
index 93dfbf1302315..774ae5d9143f1 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
@@ -1637,6 +1637,7 @@
compatible = "mediatek,mt8183-mfgcfg", "syscon";
reg = <0 0x13000000 0 0x1000>;
#clock-cells = <1>;
+ power-domains = <&spm MT8183_POWER_DOMAIN_MFG_ASYNC>;
};
gpu: gpu@13040000 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
index 3dea28f1d8061..1807e9d6cb0e4 100644
--- a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi
@@ -1296,7 +1296,7 @@
* regulator coupling requirements.
*/
regulator-name = "ppvar_dvdd_vgpu";
- regulator-min-microvolt = <600000>;
+ regulator-min-microvolt = <500000>;
regulator-max-microvolt = <950000>;
regulator-ramp-delay = <6250>;
regulator-enable-ramp-delay = <200>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
index 9b738f6a5d213..7a704246678f0 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
@@ -1421,7 +1421,7 @@
mt6315_6_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vbcpu";
- regulator-min-microvolt = <300000>;
+ regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-allowed-modes = <0 1 2>;
@@ -1431,7 +1431,7 @@
mt6315_6_vbuck3: vbuck3 {
regulator-compatible = "vbuck3";
regulator-name = "Vlcpu";
- regulator-min-microvolt = <300000>;
+ regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-allowed-modes = <0 1 2>;
@@ -1448,7 +1448,7 @@
mt6315_7_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vgpu";
- regulator-min-microvolt = <606250>;
+ regulator-min-microvolt = <400000>;
regulator-max-microvolt = <800000>;
regulator-enable-ramp-delay = <256>;
regulator-allowed-modes = <0 1 2>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8192.dtsi b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
index 05e401670bced..84cbdf6e9eb0c 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
@@ -1464,6 +1464,7 @@
reg = <0 0x14001000 0 0x1000>;
interrupts = <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&mmsys CLK_MM_DISP_MUTEX0>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x1000 0x1000>;
mediatek,gce-events = <CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_0>,
<CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_1>;
power-domains = <&spm MT8192_POWER_DOMAIN_DISP>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
index f94c07f8b9334..4a11918da3704 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
@@ -264,6 +264,38 @@
status = "okay";
};
+&cpu0 {
+ cpu-supply = <&mt6359_vcore_buck_reg>;
+};
+
+&cpu1 {
+ cpu-supply = <&mt6359_vcore_buck_reg>;
+};
+
+&cpu2 {
+ cpu-supply = <&mt6359_vcore_buck_reg>;
+};
+
+&cpu3 {
+ cpu-supply = <&mt6359_vcore_buck_reg>;
+};
+
+&cpu4 {
+ cpu-supply = <&mt6315_6_vbuck1>;
+};
+
+&cpu5 {
+ cpu-supply = <&mt6315_6_vbuck1>;
+};
+
+&cpu6 {
+ cpu-supply = <&mt6315_6_vbuck1>;
+};
+
+&cpu7 {
+ cpu-supply = <&mt6315_6_vbuck1>;
+};
+
&dp_intf0 {
status = "okay";
@@ -1214,7 +1246,7 @@
mt6315_6_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vbcpu";
- regulator-min-microvolt = <300000>;
+ regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-ramp-delay = <6250>;
@@ -1232,7 +1264,7 @@
mt6315_7_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vgpu";
- regulator-min-microvolt = <625000>;
+ regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-ramp-delay = <6250>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
index ea6dc220e1cce..5d8b68f86ce44 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
@@ -2028,6 +2028,7 @@
compatible = "mediatek,mt8195-vppsys0", "syscon";
reg = <0 0x14000000 0 0x1000>;
#clock-cells = <1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0 0x1000>;
};
dma-controller@14001000 {
@@ -2251,6 +2252,7 @@
compatible = "mediatek,mt8195-vppsys1", "syscon";
reg = <0 0x14f00000 0 0x1000>;
#clock-cells = <1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f0XXXX 0 0x1000>;
};
mutex@14f01000 {
@@ -3080,6 +3082,7 @@
reg = <0 0x1c01a000 0 0x1000>;
mboxes = <&gce0 0 CMDQ_THR_PRIO_4>;
#clock-cells = <1>;
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c01XXXX 0xa000 0x1000>;
};
@@ -3261,6 +3264,7 @@
interrupts = <GIC_SPI 658 IRQ_TYPE_LEVEL_HIGH 0>;
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS0>;
clocks = <&vdosys0 CLK_VDO0_DISP_MUTEX0>;
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c01XXXX 0x6000 0x1000>;
mediatek,gce-events = <CMDQ_EVENT_VDO0_DISP_STREAM_DONE_0>;
};
@@ -3331,6 +3335,7 @@
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
clocks = <&vdosys1 CLK_VDO1_DISP_MUTEX>;
clock-names = "vdo1_mutex";
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0x1000 0x1000>;
mediatek,gce-events = <CMDQ_EVENT_VDO1_STREAM_DONE_ENG_0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
index 7e7f0f0fb41ba..41f51d3261110 100644
--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
@@ -3707,7 +3707,7 @@
compatible = "qcom,sc7280-adsp-pas";
reg = <0 0x03700000 0 0x100>;
- interrupts-extended = <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
<&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
<&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
<&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -3944,7 +3944,7 @@
compatible = "qcom,sc7280-cdsp-pas";
reg = <0 0x0a300000 0 0x10000>;
- interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
<&cdsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
<&cdsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
<&cdsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm64/boot/dts/qcom/sc8180x.dtsi b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
index 32afc78d5b769..053f7861c3cec 100644
--- a/arch/arm64/boot/dts/qcom/sc8180x.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
@@ -2701,7 +2701,7 @@
resets = <&gcc GCC_USB30_SEC_BCR>;
power-domains = <&gcc USB30_SEC_GDSC>;
interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
- <&pdc 7 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 40 IRQ_TYPE_LEVEL_HIGH>,
<&pdc 10 IRQ_TYPE_EDGE_BOTH>,
<&pdc 11 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "hs_phy_irq", "ss_phy_irq",
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
index a5b194813079e..d0f82e12289e1 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
@@ -1774,6 +1774,7 @@
reset-names = "pci";
power-domains = <&gcc PCIE_4_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie4_phy>;
phy-names = "pciephy";
@@ -1872,6 +1873,7 @@
reset-names = "pci";
power-domains = <&gcc PCIE_3B_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie3b_phy>;
phy-names = "pciephy";
@@ -1970,6 +1972,7 @@
reset-names = "pci";
power-domains = <&gcc PCIE_3A_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie3a_phy>;
phy-names = "pciephy";
@@ -2071,6 +2074,7 @@
reset-names = "pci";
power-domains = <&gcc PCIE_2B_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie2b_phy>;
phy-names = "pciephy";
@@ -2169,6 +2173,7 @@
reset-names = "pci";
power-domains = <&gcc PCIE_2A_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie2a_phy>;
phy-names = "pciephy";
@@ -2641,7 +2646,7 @@
compatible = "qcom,sc8280xp-adsp-pas";
reg = <0 0x03000000 0 0x100>;
- interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -4977,7 +4982,7 @@
compatible = "qcom,sc8280xp-nsp0-pas";
reg = <0 0x1b300000 0 0x100>;
- interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
<&smp2p_nsp0_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_nsp0_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_nsp0_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -5108,7 +5113,7 @@
compatible = "qcom,sc8280xp-nsp1-pas";
reg = <0 0x21300000 0 0x100>;
- interrupts-extended = <&intc GIC_SPI 887 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 887 IRQ_TYPE_EDGE_RISING>,
<&smp2p_nsp1_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_nsp1_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_nsp1_in 2 IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
index 24bcec3366efd..0be053555602c 100644
--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
@@ -1252,7 +1252,7 @@
compatible = "qcom,sm6350-adsp-pas";
reg = <0 0x03000000 0 0x100>;
- interrupts-extended = <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -1511,7 +1511,7 @@
compatible = "qcom,sm6350-cdsp-pas";
reg = <0 0x08300000 0 0x10000>;
- interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 2 IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm64/boot/dts/qcom/sm6375.dtsi b/arch/arm64/boot/dts/qcom/sm6375.dtsi
index 4386f8a9c636c..f40509d91bbda 100644
--- a/arch/arm64/boot/dts/qcom/sm6375.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6375.dtsi
@@ -1561,7 +1561,7 @@
compatible = "qcom,sm6375-adsp-pas";
reg = <0 0x0a400000 0 0x100>;
- interrupts-extended = <&intc GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 282 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
index 39bd8f0eba1e6..7f2333c9d17d6 100644
--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
@@ -3062,7 +3062,7 @@
compatible = "qcom,sm8250-slpi-pas";
reg = <0 0x05c00000 0 0x4000>;
- interrupts-extended = <&pdc 9 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&pdc 9 IRQ_TYPE_EDGE_RISING>,
<&smp2p_slpi_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_slpi_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_slpi_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -3766,7 +3766,7 @@
compatible = "qcom,sm8250-cdsp-pas";
reg = <0 0x08300000 0 0x10000>;
- interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_cdsp_in 2 IRQ_TYPE_EDGE_RISING>,
@@ -5928,7 +5928,7 @@
compatible = "qcom,sm8250-adsp-pas";
reg = <0 0x17300000 0 0x100>;
- interrupts-extended = <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
<&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
index b86be34a912b9..024d2653cc307 100644
--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
@@ -1777,12 +1777,8 @@
ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x100000>,
<0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0x3d00000>;
- /*
- * MSIs for BDF (1:0.0) only works with Device ID 0x5980.
- * Hence, the IDs are swapped.
- */
- msi-map = <0x0 &gic_its 0x5981 0x1>,
- <0x100 &gic_its 0x5980 0x1>;
+ msi-map = <0x0 &gic_its 0x5980 0x1>,
+ <0x100 &gic_its 0x5981 0x1>;
msi-map-mask = <0xff00>;
interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
@@ -1900,12 +1896,8 @@
ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
<0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
- /*
- * MSIs for BDF (1:0.0) only works with Device ID 0x5a00.
- * Hence, the IDs are swapped.
- */
- msi-map = <0x0 &gic_its 0x5a01 0x1>,
- <0x100 &gic_its 0x5a00 0x1>;
+ msi-map = <0x0 &gic_its 0x5a00 0x1>,
+ <0x100 &gic_its 0x5a01 0x1>;
msi-map-mask = <0xff00>;
interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi
index 3904348075f67..3348bc06db488 100644
--- a/arch/arm64/boot/dts/qcom/sm8550.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi
@@ -1755,9 +1755,8 @@
<&gem_noc MASTER_APPSS_PROC 0 &cnoc_main SLAVE_PCIE_0 0>;
interconnect-names = "pcie-mem", "cpu-pcie";
- /* Entries are reversed due to the unusual ITS DeviceID encoding */
- msi-map = <0x0 &gic_its 0x1401 0x1>,
- <0x100 &gic_its 0x1400 0x1>;
+ msi-map = <0x0 &gic_its 0x1400 0x1>,
+ <0x100 &gic_its 0x1401 0x1>;
iommu-map = <0x0 &apps_smmu 0x1400 0x1>,
<0x100 &apps_smmu 0x1401 0x1>;
@@ -1867,9 +1866,8 @@
<&gem_noc MASTER_APPSS_PROC 0 &cnoc_main SLAVE_PCIE_1 0>;
interconnect-names = "pcie-mem", "cpu-pcie";
- /* Entries are reversed due to the unusual ITS DeviceID encoding */
- msi-map = <0x0 &gic_its 0x1481 0x1>,
- <0x100 &gic_its 0x1480 0x1>;
+ msi-map = <0x0 &gic_its 0x1480 0x1>,
+ <0x100 &gic_its 0x1481 0x1>;
iommu-map = <0x0 &apps_smmu 0x1480 0x1>,
<0x100 &apps_smmu 0x1481 0x1>;
diff --git a/arch/arm64/boot/dts/qcom/sm8650.dtsi b/arch/arm64/boot/dts/qcom/sm8650.dtsi
index ba72d8f384207..eb117866e59ff 100644
--- a/arch/arm64/boot/dts/qcom/sm8650.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8650.dtsi
@@ -2274,9 +2274,8 @@
interrupt-map-mask = <0 0 0 0x7>;
#interrupt-cells = <1>;
- /* Entries are reversed due to the unusual ITS DeviceID encoding */
- msi-map = <0x0 &gic_its 0x1401 0x1>,
- <0x100 &gic_its 0x1400 0x1>;
+ msi-map = <0x0 &gic_its 0x1400 0x1>,
+ <0x100 &gic_its 0x1401 0x1>;
msi-map-mask = <0xff00>;
linux,pci-domain = <0>;
@@ -2402,9 +2401,8 @@
interrupt-map-mask = <0 0 0 0x7>;
#interrupt-cells = <1>;
- /* Entries are reversed due to the unusual ITS DeviceID encoding */
- msi-map = <0x0 &gic_its 0x1481 0x1>,
- <0x100 &gic_its 0x1480 0x1>;
+ msi-map = <0x0 &gic_its 0x1480 0x1>,
+ <0x100 &gic_its 0x1481 0x1>;
msi-map-mask = <0xff00>;
linux,pci-domain = <1>;
diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
index 8e517f76189e1..6b40082bac68c 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
@@ -284,7 +284,7 @@
domain-idle-states {
CLUSTER_CL4: cluster-sleep-0 {
- compatible = "arm,idle-state";
+ compatible = "domain-idle-state";
idle-state-name = "l2-ret";
arm,psci-suspend-param = <0x01000044>;
entry-latency-us = <350>;
@@ -293,7 +293,7 @@
};
CLUSTER_CL5: cluster-sleep-1 {
- compatible = "arm,idle-state";
+ compatible = "domain-idle-state";
idle-state-name = "ret-pll-off";
arm,psci-suspend-param = <0x01000054>;
entry-latency-us = <2200>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
index 5846a11f0e848..d5e035823eb5e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
@@ -663,7 +663,7 @@ camera: &i2c7 {
port@1 {
reg = <1>;
- mipi1_in_panel: endpoint@1 {
+ mipi1_in_panel: endpoint {
remote-endpoint = <&mipi1_out_panel>;
};
};
@@ -689,7 +689,6 @@ camera: &i2c7 {
ep-gpios = <&gpio0 3 GPIO_ACTIVE_HIGH>;
/* PERST# asserted in S3 */
- pcie-reset-suspend = <1>;
vpcie3v3-supply = <&wlan_3v3>;
vpcie1v8-supply = <&pp1800_pcie>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
index dfb2a0bdea5b7..9586bb12a5d8f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
@@ -611,7 +611,7 @@
#size-cells = <0>;
interface@0 { /* interface 0 of configuration 1 */
- compatible = "usbbda,8156.config1.0";
+ compatible = "usbifbda,8156.config1.0";
reg = <0 1>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
index 054c6a4d1a45f..294eb2de263de 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
@@ -779,7 +779,6 @@
};
&pcie0 {
- bus-scan-delay-ms = <1000>;
ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>;
num-lanes = <4>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
index 2c3984a880af6..f6f15946579eb 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
@@ -194,6 +194,8 @@
num-lanes = <4>;
pinctrl-names = "default";
pinctrl-0 = <&pcie_clkreqn_cpm>;
+ vpcie3v3-supply = <&vcc3v3_baseboard>;
+ vpcie12v-supply = <&dc_12v>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index c08e69391c015..ccbe3a7a1d2c2 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -79,6 +79,26 @@
regulator-max-microvolt = <5000000>;
};
+ vcca_0v9: vcca-0v9-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcca_0v9";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ vin-supply = <&vcc_1v8>;
+ };
+
+ vcca_1v8: vcca-1v8-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcca_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc3v3_sys>;
+ };
+
vdd_log: vdd-log {
compatible = "pwm-regulator";
pwms = <&pwm2 0 25000 1>;
@@ -416,16 +436,28 @@
gpio1830-supply = <&vcc_1v8>;
};
-&pmu_io_domains {
- status = "okay";
- pmu1830-supply = <&vcc_1v8>;
+&pcie0 {
+ /* PCIe PHY supplies */
+ vpcie0v9-supply = <&vcca_0v9>;
+ vpcie1v8-supply = <&vcca_1v8>;
};
-&pwm2 {
- status = "okay";
+&pcie_clkreqn_cpm {
+ rockchip,pins =
+ <2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_up>;
};
&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&q7_thermal_pin>;
+
+ gpios {
+ q7_thermal_pin: q7-thermal-pin {
+ rockchip,pins =
+ <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
i2c8 {
i2c8_xfer_a: i2c8-xfer {
rockchip,pins =
@@ -458,11 +490,20 @@
usb3 {
usb3_id: usb3-id {
rockchip,pins =
- <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
+ <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
};
+&pmu_io_domains {
+ status = "okay";
+ pmu1830-supply = <&vcc_1v8>;
+};
+
+&pwm2 {
+ status = "okay";
+};
+
&sdhci {
/*
* Signal integrity isn't great at 200MHz but 100MHz has proven stable
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
index 6ecdf5d283390..c1194d1e438d0 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
@@ -447,7 +447,6 @@
&pcie2x1 {
reset-gpios = <&gpio0 RK_PB6 GPIO_ACTIVE_HIGH>;
- disable-gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_HIGH>;
vpcie3v3-supply = <&vcc3v3_pcie>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
index 7b5f3904ef610..c87fad2c34cba 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
@@ -416,6 +416,8 @@
vccio_sd: LDO_REG5 {
regulator-name = "vccio_sd";
+ regulator-always-on;
+ regulator-boot-on;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
@@ -525,9 +527,9 @@
#address-cells = <1>;
#size-cells = <0>;
- switch@0 {
+ switch@1f {
compatible = "mediatek,mt7531";
- reg = <0>;
+ reg = <0x1f>;
ports {
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts b/arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts
index a8a4cc190eb32..a3112d5df2008 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts
@@ -523,7 +523,6 @@
&pcie2x1 {
reset-gpios = <&gpio3 RK_PC1 GPIO_ACTIVE_HIGH>;
- disable-gpios = <&gpio3 RK_PC2 GPIO_ACTIVE_HIGH>;
vpcie3v3-supply = <&vcc3v3_mini_pcie>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi
index cce1c8e835877..94ecb9b4f98f8 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi
@@ -216,9 +216,9 @@
pinctrl-0 = <&i2c7m0_xfer>;
status = "okay";
- es8316: audio-codec@11 {
+ es8316: audio-codec@10 {
compatible = "everest,es8316";
- reg = <0x11>;
+ reg = <0x10>;
assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
assigned-clock-rates = <12288000>;
clocks = <&cru I2S0_8CH_MCLKOUT>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
index 1b606ea5b6cf2..1a604429fb266 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
@@ -485,6 +485,7 @@
pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
<&rk806_dvs2_null>, <&rk806_dvs3_null>;
spi-max-frequency = <1000000>;
+ system-power-controller;
vcc1-supply = <&vcc5v0_sys>;
vcc2-supply = <&vcc5v0_sys>;
@@ -506,7 +507,7 @@
#gpio-cells = <2>;
rk806_dvs1_null: dvs1-null-pins {
- pins = "gpio_pwrctrl2";
+ pins = "gpio_pwrctrl1";
function = "pin_fun0";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts b/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts
index 67414d72e2b6e..22bbfbe729c11 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts
@@ -456,6 +456,7 @@
<&rk806_dvs2_null>, <&rk806_dvs3_null>;
pinctrl-names = "default";
spi-max-frequency = <1000000>;
+ system-power-controller;
vcc1-supply = <&vcc4v0_sys>;
vcc2-supply = <&vcc4v0_sys>;
diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
index f48b8dab8b3d2..1d26bb5b02f4b 100644
--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
+++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
@@ -338,12 +338,12 @@ int kvm_register_vgic_device(unsigned long type)
int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
struct vgic_reg_attr *reg_attr)
{
- int cpuid;
+ int cpuid = FIELD_GET(KVM_DEV_ARM_VGIC_CPUID_MASK, attr->attr);
- cpuid = FIELD_GET(KVM_DEV_ARM_VGIC_CPUID_MASK, attr->attr);
-
- reg_attr->vcpu = kvm_get_vcpu_by_id(dev->kvm, cpuid);
reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+ reg_attr->vcpu = kvm_get_vcpu_by_id(dev->kvm, cpuid);
+ if (!reg_attr->vcpu)
+ return -EINVAL;
return 0;
}
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 76b91f36c729c..cf9a9ebb4cda9 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -1905,15 +1905,15 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
emit_call(enter_prog, ctx);
+ /* save return value to callee saved register x20 */
+ emit(A64_MOV(1, A64_R(20), A64_R(0)), ctx);
+
/* if (__bpf_prog_enter(prog) == 0)
* goto skip_exec_of_prog;
*/
branch = ctx->image + ctx->idx;
emit(A64_NOP, ctx);
- /* save return value to callee saved register x20 */
- emit(A64_MOV(1, A64_R(20), A64_R(0)), ctx);
-
emit(A64_ADD_I(1, A64_R(0), A64_SP, args_off), ctx);
if (!p->jited)
emit_addr_mov_i64(A64_R(1), (const u64)p->insnsi, ctx);
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index a5f300ec6f280..54ad04dacdee9 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -595,7 +595,7 @@ config ARCH_SELECTS_CRASH_DUMP
select RELOCATABLE
config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
- def_bool CRASH_CORE
+ def_bool CRASH_RESERVE
config RELOCATABLE
bool "Relocatable kernel"
diff --git a/arch/loongarch/include/asm/crash_core.h b/arch/loongarch/include/asm/crash_reserve.h
index 218bdbfa527ba..a1d9b84b1c7d5 100644
--- a/arch/loongarch/include/asm/crash_core.h
+++ b/arch/loongarch/include/asm/crash_reserve.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _LOONGARCH_CRASH_CORE_H
-#define _LOONGARCH_CRASH_CORE_H
+#ifndef _LOONGARCH_CRASH_RESERVE_H
+#define _LOONGARCH_CRASH_RESERVE_H
#define CRASH_ALIGN SZ_2M
diff --git a/arch/loongarch/include/asm/perf_event.h b/arch/loongarch/include/asm/perf_event.h
index 2a35a0bc2aaab..52b638059e40b 100644
--- a/arch/loongarch/include/asm/perf_event.h
+++ b/arch/loongarch/include/asm/perf_event.h
@@ -7,6 +7,14 @@
#ifndef __LOONGARCH_PERF_EVENT_H__
#define __LOONGARCH_PERF_EVENT_H__
+#include <asm/ptrace.h>
+
#define perf_arch_bpf_user_pt_regs(regs) (struct user_pt_regs *)regs
+#define perf_arch_fetch_caller_regs(regs, __ip) { \
+ (regs)->csr_era = (__ip); \
+ (regs)->regs[3] = current_stack_pointer; \
+ (regs)->regs[22] = (unsigned long) __builtin_frame_address(0); \
+}
+
#endif /* __LOONGARCH_PERF_EVENT_H__ */
diff --git a/arch/loongarch/include/asm/tlb.h b/arch/loongarch/include/asm/tlb.h
index da7a3b5b9374a..e071f5e9e8580 100644
--- a/arch/loongarch/include/asm/tlb.h
+++ b/arch/loongarch/include/asm/tlb.h
@@ -132,8 +132,6 @@ static __always_inline void invtlb_all(u32 op, u32 info, u64 addr)
);
}
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-
static void tlb_flush(struct mmu_gather *tlb);
#define tlb_flush tlb_flush
diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c
index 0491bf453cd49..cac7cba81b65f 100644
--- a/arch/loongarch/kernel/perf_event.c
+++ b/arch/loongarch/kernel/perf_event.c
@@ -884,4 +884,4 @@ static int __init init_hw_perf_events(void)
return 0;
}
-early_initcall(init_hw_perf_events);
+pure_initcall(init_hw_perf_events);
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
index 1fc2f6813ea02..97b40defde060 100644
--- a/arch/loongarch/mm/fault.c
+++ b/arch/loongarch/mm/fault.c
@@ -202,10 +202,10 @@ good_area:
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
- if (!(vma->vm_flags & VM_READ) && address != exception_era(regs))
- goto bad_area;
if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs))
goto bad_area;
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)) && address != exception_era(regs))
+ goto bad_area;
}
/*
diff --git a/arch/riscv/Kconfig.errata b/arch/riscv/Kconfig.errata
index 910ba8837add8..2acc7d876e1fb 100644
--- a/arch/riscv/Kconfig.errata
+++ b/arch/riscv/Kconfig.errata
@@ -82,14 +82,14 @@ config ERRATA_THEAD
Otherwise, please say "N" here to avoid unnecessary overhead.
-config ERRATA_THEAD_PBMT
- bool "Apply T-Head memory type errata"
+config ERRATA_THEAD_MAE
+ bool "Apply T-Head's memory attribute extension (XTheadMae) errata"
depends on ERRATA_THEAD && 64BIT && MMU
select RISCV_ALTERNATIVE_EARLY
default y
help
- This will apply the memory type errata to handle the non-standard
- memory type bits in page-table-entries on T-Head SoCs.
+ This will apply the memory attribute extension errata to handle the
+ non-standard PTE utilization on T-Head SoCs (XTheadMae).
If you don't know what to do here, say "Y".
diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c
index b1c410bbc1aec..bf6a0a6318ee3 100644
--- a/arch/riscv/errata/thead/errata.c
+++ b/arch/riscv/errata/thead/errata.c
@@ -19,20 +19,26 @@
#include <asm/patch.h>
#include <asm/vendorid_list.h>
-static bool errata_probe_pbmt(unsigned int stage,
- unsigned long arch_id, unsigned long impid)
+#define CSR_TH_SXSTATUS 0x5c0
+#define SXSTATUS_MAEE _AC(0x200000, UL)
+
+static bool errata_probe_mae(unsigned int stage,
+ unsigned long arch_id, unsigned long impid)
{
- if (!IS_ENABLED(CONFIG_ERRATA_THEAD_PBMT))
+ if (!IS_ENABLED(CONFIG_ERRATA_THEAD_MAE))
return false;
if (arch_id != 0 || impid != 0)
return false;
- if (stage == RISCV_ALTERNATIVES_EARLY_BOOT ||
- stage == RISCV_ALTERNATIVES_MODULE)
- return true;
+ if (stage != RISCV_ALTERNATIVES_EARLY_BOOT &&
+ stage != RISCV_ALTERNATIVES_MODULE)
+ return false;
+
+ if (!(csr_read(CSR_TH_SXSTATUS) & SXSTATUS_MAEE))
+ return false;
- return false;
+ return true;
}
/*
@@ -140,8 +146,8 @@ static u32 thead_errata_probe(unsigned int stage,
{
u32 cpu_req_errata = 0;
- if (errata_probe_pbmt(stage, archid, impid))
- cpu_req_errata |= BIT(ERRATA_THEAD_PBMT);
+ if (errata_probe_mae(stage, archid, impid))
+ cpu_req_errata |= BIT(ERRATA_THEAD_MAE);
errata_probe_cmo(stage, archid, impid);
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
index 1f2dbfb8a8bfc..efd851e1b4832 100644
--- a/arch/riscv/include/asm/errata_list.h
+++ b/arch/riscv/include/asm/errata_list.h
@@ -23,7 +23,7 @@
#endif
#ifdef CONFIG_ERRATA_THEAD
-#define ERRATA_THEAD_PBMT 0
+#define ERRATA_THEAD_MAE 0
#define ERRATA_THEAD_PMU 1
#define ERRATA_THEAD_NUMBER 2
#endif
@@ -53,20 +53,20 @@ asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID, \
* in the default case.
*/
#define ALT_SVPBMT_SHIFT 61
-#define ALT_THEAD_PBMT_SHIFT 59
+#define ALT_THEAD_MAE_SHIFT 59
#define ALT_SVPBMT(_val, prot) \
asm(ALTERNATIVE_2("li %0, 0\t\nnop", \
"li %0, %1\t\nslli %0,%0,%3", 0, \
RISCV_ISA_EXT_SVPBMT, CONFIG_RISCV_ISA_SVPBMT, \
"li %0, %2\t\nslli %0,%0,%4", THEAD_VENDOR_ID, \
- ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \
+ ERRATA_THEAD_MAE, CONFIG_ERRATA_THEAD_MAE) \
: "=r"(_val) \
: "I"(prot##_SVPBMT >> ALT_SVPBMT_SHIFT), \
- "I"(prot##_THEAD >> ALT_THEAD_PBMT_SHIFT), \
+ "I"(prot##_THEAD >> ALT_THEAD_MAE_SHIFT), \
"I"(ALT_SVPBMT_SHIFT), \
- "I"(ALT_THEAD_PBMT_SHIFT))
+ "I"(ALT_THEAD_MAE_SHIFT))
-#ifdef CONFIG_ERRATA_THEAD_PBMT
+#ifdef CONFIG_ERRATA_THEAD_MAE
/*
* IO/NOCACHE memory types are handled together with svpbmt,
* so on T-Head chips, check if no other memory type is set,
@@ -83,11 +83,11 @@ asm volatile(ALTERNATIVE( \
"slli t3, t3, %3\n\t" \
"or %0, %0, t3\n\t" \
"2:", THEAD_VENDOR_ID, \
- ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \
+ ERRATA_THEAD_MAE, CONFIG_ERRATA_THEAD_MAE) \
: "+r"(_val) \
- : "I"(_PAGE_MTMASK_THEAD >> ALT_THEAD_PBMT_SHIFT), \
- "I"(_PAGE_PMA_THEAD >> ALT_THEAD_PBMT_SHIFT), \
- "I"(ALT_THEAD_PBMT_SHIFT) \
+ : "I"(_PAGE_MTMASK_THEAD >> ALT_THEAD_MAE_SHIFT), \
+ "I"(_PAGE_PMA_THEAD >> ALT_THEAD_MAE_SHIFT), \
+ "I"(ALT_THEAD_MAE_SHIFT) \
: "t3")
#else
#define ALT_THEAD_PMA(_val)
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 2947423b5082e..115ac98b8d729 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -89,7 +89,7 @@ typedef struct page *pgtable_t;
#define PTE_FMT "%08lx"
#endif
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
/*
* We override this value as its generic definition uses __pa too early in
* the boot process (before kernel_map.va_pa_offset is set).
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 9f8ea0e33eb10..6afd6bb4882eb 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -896,7 +896,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
#define PAGE_SHARED __pgprot(0)
#define PAGE_KERNEL __pgprot(0)
#define swapper_pg_dir NULL
-#define TASK_SIZE 0xffffffffUL
+#define TASK_SIZE _AC(-1, UL)
#define VMALLOC_START _AC(0, UL)
#define VMALLOC_END TASK_SIZE
diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h
index 9f2a8e3ff2048..2902f68dc913a 100644
--- a/arch/riscv/include/uapi/asm/hwprobe.h
+++ b/arch/riscv/include/uapi/asm/hwprobe.h
@@ -54,7 +54,7 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_EXT_ZFHMIN (1 << 28)
#define RISCV_HWPROBE_EXT_ZIHINTNTL (1 << 29)
#define RISCV_HWPROBE_EXT_ZVFH (1 << 30)
-#define RISCV_HWPROBE_EXT_ZVFHMIN (1 << 31)
+#define RISCV_HWPROBE_EXT_ZVFHMIN (1ULL << 31)
#define RISCV_HWPROBE_EXT_ZFA (1ULL << 32)
#define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33)
#define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index fe8e159394d8e..9687618432031 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -231,7 +231,7 @@ static void __init setup_bootmem(void)
* In 64-bit, any use of __va/__pa before this point is wrong as we
* did not know the start of DRAM before.
*/
- if (IS_ENABLED(CONFIG_64BIT))
+ if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU))
kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
/*
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 15e482f2c6572..e713704be8372 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -730,6 +730,9 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of
if (ret)
return ret;
+ /* store prog start time */
+ emit_mv(RV_REG_S1, RV_REG_A0, ctx);
+
/* if (__bpf_prog_enter(prog) == 0)
* goto skip_exec_of_prog;
*/
@@ -737,9 +740,6 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of
/* nop reserved for conditional jump */
emit(rv_nop(), ctx);
- /* store prog start time */
- emit_mv(RV_REG_S1, RV_REG_A0, ctx);
-
/* arg1: &args_off */
emit_addi(RV_REG_A0, RV_REG_FP, -args_off, ctx);
if (!p->jited)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4474bf32d0a49..928820e61cb50 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -62,6 +62,7 @@ config X86
select ACPI_HOTPLUG_CPU if ACPI_PROCESSOR && HOTPLUG_CPU
select ARCH_32BIT_OFF_T if X86_32
select ARCH_CLOCKSOURCE_INIT
+ select ARCH_CONFIGURES_CPU_MITIGATIONS
select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
select ARCH_ENABLE_HUGEPAGE_MIGRATION if X86_64 && HUGETLB_PAGE && MIGRATION
select ARCH_ENABLE_MEMORY_HOTPLUG if X86_64
@@ -2488,17 +2489,21 @@ config PREFIX_SYMBOLS
def_bool y
depends on CALL_PADDING && !CFI_CLANG
-menuconfig SPECULATION_MITIGATIONS
- bool "Mitigations for speculative execution vulnerabilities"
+menuconfig CPU_MITIGATIONS
+ bool "Mitigations for CPU vulnerabilities"
default y
help
- Say Y here to enable options which enable mitigations for
- speculative execution hardware vulnerabilities.
+ Say Y here to enable options which enable mitigations for hardware
+ vulnerabilities (usually related to speculative execution).
+ Mitigations can be disabled or restricted to SMT systems at runtime
+ via the "mitigations" kernel parameter.
- If you say N, all mitigations will be disabled. You really
- should know what you are doing to say so.
+ If you say N, all mitigations will be disabled. This CANNOT be
+ overridden at runtime.
-if SPECULATION_MITIGATIONS
+ Say 'Y', unless you really know what you are doing.
+
+if CPU_MITIGATIONS
config MITIGATION_PAGE_TABLE_ISOLATION
bool "Remove the kernel mapping in user mode"
diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h
index c086699b0d0c5..aa6c8f8ca9588 100644
--- a/arch/x86/include/asm/coco.h
+++ b/arch/x86/include/asm/coco.h
@@ -25,6 +25,7 @@ u64 cc_mkdec(u64 val);
void cc_random_init(void);
#else
#define cc_vendor (CC_VENDOR_NONE)
+static const u64 cc_mask = 0;
static inline u64 cc_mkenc(u64 val)
{
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 0b748ee16b3d9..9abb8cc4cd474 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -148,7 +148,7 @@
#define _COMMON_PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | \
_PAGE_DIRTY_BITS | _PAGE_SOFT_DIRTY | \
- _PAGE_DEVMAP | _PAGE_ENC | _PAGE_UFFD_WP)
+ _PAGE_DEVMAP | _PAGE_CC | _PAGE_UFFD_WP)
#define _PAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PAT)
#define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE)
@@ -173,6 +173,7 @@ enum page_cache_mode {
};
#endif
+#define _PAGE_CC (_AT(pteval_t, cc_mask))
#define _PAGE_ENC (_AT(pteval_t, sme_me_mask))
#define _PAGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index cb9eece55904d..307302af0aeee 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -459,8 +459,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
case 0x1a:
switch (c->x86_model) {
- case 0x00 ... 0x0f:
- case 0x20 ... 0x2f:
+ case 0x00 ... 0x2f:
case 0x40 ... 0x4f:
case 0x70 ... 0x7f:
setup_force_cpu_cap(X86_FEATURE_ZEN5);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 7062b84dd467d..6d3d20e3e43a9 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -139,7 +139,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
log_lvl, d3, d6, d7);
}
- if (cpu_feature_enabled(X86_FEATURE_OSPKE))
+ if (cr4 & X86_CR4_PKE)
printk("%sPKRU: %08x\n", log_lvl, read_pkru());
}
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index 8b04958da5e7d..b4f8fa0f722cd 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -1203,12 +1203,14 @@ static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt,
break;
case SVM_EXIT_MONITOR:
- if (opcode == 0x010f && modrm == 0xc8)
+ /* MONITOR and MONITORX instructions generate the same error code */
+ if (opcode == 0x010f && (modrm == 0xc8 || modrm == 0xfa))
return ES_OK;
break;
case SVM_EXIT_MWAIT:
- if (opcode == 0x010f && modrm == 0xc9)
+ /* MWAIT and MWAITX instructions generate the same error code */
+ if (opcode == 0x010f && (modrm == 0xc9 || modrm == 0xfb))
return ES_OK;
break;
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index ff217cc35ce92..5159c7a229229 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1867,36 +1867,41 @@ populate_extable:
if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
/* Conservatively check that src_reg + insn->off is a kernel address:
- * src_reg + insn->off >= TASK_SIZE_MAX + PAGE_SIZE
- * src_reg is used as scratch for src_reg += insn->off and restored
- * after emit_ldx if necessary
+ * src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE
+ * and
+ * src_reg + insn->off < VSYSCALL_ADDR
*/
- u64 limit = TASK_SIZE_MAX + PAGE_SIZE;
+ u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR;
u8 *end_of_jmp;
- /* At end of these emitted checks, insn->off will have been added
- * to src_reg, so no need to do relative load with insn->off offset
- */
- insn_off = 0;
+ /* movabsq r10, VSYSCALL_ADDR */
+ emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32,
+ (u32)(long)VSYSCALL_ADDR);
- /* movabsq r11, limit */
- EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG));
- EMIT((u32)limit, 4);
- EMIT(limit >> 32, 4);
+ /* mov src_reg, r11 */
+ EMIT_mov(AUX_REG, src_reg);
if (insn->off) {
- /* add src_reg, insn->off */
- maybe_emit_1mod(&prog, src_reg, true);
- EMIT2_off32(0x81, add_1reg(0xC0, src_reg), insn->off);
+ /* add r11, insn->off */
+ maybe_emit_1mod(&prog, AUX_REG, true);
+ EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
}
- /* cmp src_reg, r11 */
- maybe_emit_mod(&prog, src_reg, AUX_REG, true);
- EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG));
+ /* sub r11, r10 */
+ maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
+ EMIT2(0x29, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
+
+ /* movabsq r10, limit */
+ emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32,
+ (u32)(long)limit);
+
+ /* cmp r10, r11 */
+ maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
+ EMIT2(0x39, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
- /* if unsigned '>=', goto load */
- EMIT2(X86_JAE, 0);
+ /* if unsigned '>', goto load */
+ EMIT2(X86_JA, 0);
end_of_jmp = prog;
/* xor dst_reg, dst_reg */
@@ -1922,18 +1927,6 @@ populate_extable:
/* populate jmp_offset for JMP above */
start_of_ldx[-1] = prog - start_of_ldx;
- if (insn->off && src_reg != dst_reg) {
- /* sub src_reg, insn->off
- * Restore src_reg after "add src_reg, insn->off" in prev
- * if statement. But if src_reg == dst_reg, emit_ldx
- * above already clobbered src_reg, so no need to restore.
- * If add src_reg, insn->off was unnecessary, no need to
- * restore either.
- */
- maybe_emit_1mod(&prog, src_reg, true);
- EMIT2_off32(0x81, add_1reg(0xE8, src_reg), insn->off);
- }
-
if (!bpf_prog->aux->extable)
break;
@@ -3559,3 +3552,9 @@ bool bpf_jit_supports_ptr_xchg(void)
{
return true;
}
+
+/* x86-64 JIT emits its own code to filter user addresses so return 0 here */
+u64 bpf_arch_uaddress_limit(void)
+{
+ return 0;
+}
diff --git a/block/bdev.c b/block/bdev.c
index 4dc94145eb533..da2a167a4d08b 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -882,7 +882,7 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
goto abort_claiming;
ret = -EBUSY;
if (!bdev_may_open(bdev, mode))
- goto abort_claiming;
+ goto put_module;
if (bdev_is_partition(bdev))
ret = blkdev_get_part(bdev, mode);
else
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 4bfbe55553f41..a40b6f3946efe 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -170,8 +170,8 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
/* Shift and apply the mask for CPC reads/writes */
-#define MASK_VAL(reg, val) ((val) >> ((reg)->bit_offset & \
- GENMASK(((reg)->bit_width), 0)))
+#define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & \
+ GENMASK(((reg)->bit_width) - 1, 0))
static ssize_t show_feedback_ctrs(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
@@ -1002,14 +1002,14 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
}
*val = 0;
+ size = GET_BIT_WIDTH(reg);
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- u32 width = GET_BIT_WIDTH(reg);
u32 val_u32;
acpi_status status;
status = acpi_os_read_port((acpi_io_address)reg->address,
- &val_u32, width);
+ &val_u32, size);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to read SystemIO port %llx\n",
reg->address);
@@ -1018,17 +1018,22 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = val_u32;
return 0;
- } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
+ /*
+ * For registers in PCC space, the register size is determined
+ * by the bit width field; the access size is used to indicate
+ * the PCC subspace id.
+ */
+ size = reg->bit_width;
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
+ }
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
return cpc_read_ffh(cpu, reg, val);
else
return acpi_os_read_memory((acpi_physical_address)reg->address,
- val, reg->bit_width);
-
- size = GET_BIT_WIDTH(reg);
+ val, size);
switch (size) {
case 8:
@@ -1044,8 +1049,13 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = readq_relaxed(vaddr);
break;
default:
- pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
- reg->bit_width, pcc_ss_id);
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n",
+ size, reg->address);
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
+ size, pcc_ss_id);
+ }
return -EFAULT;
}
@@ -1063,12 +1073,13 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
+ size = GET_BIT_WIDTH(reg);
+
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- u32 width = GET_BIT_WIDTH(reg);
acpi_status status;
status = acpi_os_write_port((acpi_io_address)reg->address,
- (u32)val, width);
+ (u32)val, size);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to write SystemIO port %llx\n",
reg->address);
@@ -1076,17 +1087,22 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
}
return 0;
- } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
+ /*
+ * For registers in PCC space, the register size is determined
+ * by the bit width field; the access size is used to indicate
+ * the PCC subspace id.
+ */
+ size = reg->bit_width;
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
+ }
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
return cpc_write_ffh(cpu, reg, val);
else
return acpi_os_write_memory((acpi_physical_address)reg->address,
- val, reg->bit_width);
-
- size = GET_BIT_WIDTH(reg);
+ val, size);
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
val = MASK_VAL(reg, val);
@@ -1105,8 +1121,13 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
writeq_relaxed(val, vaddr);
break;
default:
- pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
- reg->bit_width, pcc_ss_id);
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n",
+ size, reg->address);
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
+ size, pcc_ss_id);
+ }
ret_val = -EFAULT;
break;
}
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index cd84af23f7eac..dd0b40b9bbe8b 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -492,16 +492,14 @@ static int lps0_device_attach(struct acpi_device *adev,
unsigned int func_mask;
/*
- * Avoid evaluating the same _DSM function for two
- * different UUIDs and prioritize the MSFT one.
+ * Log a message if the _DSM function sets for two
+ * different UUIDs overlap.
*/
func_mask = lps0_dsm_func_mask & lps0_dsm_func_mask_microsoft;
- if (func_mask) {
+ if (func_mask)
acpi_handle_info(adev->handle,
"Duplicate LPS0 _DSM functions (mask: 0x%x)\n",
func_mask);
- lps0_dsm_func_mask &= ~func_mask;
- }
}
}
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index f0f54aeccc872..65185c9fa0013 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -946,25 +946,22 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
struct device *dev = mds->cxlds.dev;
struct cxl_get_event_payload *payload;
- struct cxl_mbox_cmd mbox_cmd;
u8 log_type = type;
u16 nr_rec;
mutex_lock(&mds->event.log_lock);
payload = mds->event.buf;
- mbox_cmd = (struct cxl_mbox_cmd) {
- .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
- .payload_in = &log_type,
- .size_in = sizeof(log_type),
- .payload_out = payload,
- .min_out = struct_size(payload, records, 0),
- };
-
do {
int rc, i;
-
- mbox_cmd.size_out = mds->payload_size;
+ struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
+ .payload_in = &log_type,
+ .size_in = sizeof(log_type),
+ .payload_out = payload,
+ .size_out = mds->payload_size,
+ .min_out = struct_size(payload, records, 0),
+ };
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc) {
@@ -1297,7 +1294,6 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_poison_out *po;
struct cxl_mbox_poison_in pi;
- struct cxl_mbox_cmd mbox_cmd;
int nr_records = 0;
int rc;
@@ -1309,16 +1305,16 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
pi.offset = cpu_to_le64(offset);
pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
- mbox_cmd = (struct cxl_mbox_cmd) {
- .opcode = CXL_MBOX_OP_GET_POISON,
- .size_in = sizeof(pi),
- .payload_in = &pi,
- .size_out = mds->payload_size,
- .payload_out = po,
- .min_out = struct_size(po, record, 0),
- };
-
do {
+ struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){
+ .opcode = CXL_MBOX_OP_GET_POISON,
+ .size_in = sizeof(pi),
+ .payload_in = &pi,
+ .size_out = mds->payload_size,
+ .payload_out = po,
+ .min_out = struct_size(po, record, 0),
+ };
+
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc)
break;
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 78a938969d7d7..1398814d8fbb6 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -171,6 +171,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
u32 status_err;
unsigned short i;
+ /* Since IRQ may be shared, check if DMA controller is powered on */
+ if (status == GENMASK(31, 0))
+ return IRQ_NONE;
+
dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
/* Check if we have any interrupt from the DMA controller */
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 8078ab9acfbc3..c095a2c8f6595 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -342,7 +342,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
if (!evl)
return;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = status.tail;
h = status.head;
@@ -354,9 +354,8 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
set_bit(h, evl->bmap);
h = (h + 1) % size;
}
- spin_unlock(&evl->lock);
-
drain_workqueue(wq->wq);
+ mutex_unlock(&evl->lock);
}
static int idxd_cdev_release(struct inode *node, struct file *filep)
diff --git a/drivers/dma/idxd/debugfs.c b/drivers/dma/idxd/debugfs.c
index f3f25ee676f30..ad4245cb301d5 100644
--- a/drivers/dma/idxd/debugfs.c
+++ b/drivers/dma/idxd/debugfs.c
@@ -66,7 +66,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
if (!evl || !evl->log)
return 0;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail;
@@ -87,7 +87,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
dump_event_entry(idxd, s, i, &count, processed);
}
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
return 0;
}
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index ecfdf4a8f1f83..c41ef195eeb9f 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -775,7 +775,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
goto err_alloc;
}
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
evl->log = addr;
evl->dma = dma_addr;
evl->log_size = size;
@@ -796,7 +796,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
gencfg.evl_en = 1;
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
return 0;
err_alloc:
@@ -819,7 +819,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
if (!gencfg.evl_en)
return;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
gencfg.evl_en = 0;
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
@@ -836,7 +836,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
evl_dma = evl->dma;
evl->log = NULL;
evl->size = IDXD_EVL_SIZE_MIN;
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
dma_free_coherent(dev, evl_log_size, evl_log, evl_dma);
}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index a4099a1e2340f..7b98944135eb4 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -293,7 +293,7 @@ struct idxd_driver_data {
struct idxd_evl {
/* Lock to protect event log access. */
- spinlock_t lock;
+ struct mutex lock;
void *log;
dma_addr_t dma;
/* Total size of event log = number of entries * entry size. */
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 4954adc6bb609..264c4e47d7cca 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -354,7 +354,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
if (!evl)
return -ENOMEM;
- spin_lock_init(&evl->lock);
+ mutex_init(&evl->lock);
evl->size = IDXD_EVL_SIZE_MIN;
idxd_name = dev_name(idxd_confdev(idxd));
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 348aa21389a9f..8dc029c865515 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -363,7 +363,7 @@ static void process_evl_entries(struct idxd_device *idxd)
evl_status.bits = 0;
evl_status.int_pending = 1;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
/* Clear interrupt pending bit */
iowrite32(evl_status.bits_upper32,
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
@@ -380,7 +380,7 @@ static void process_evl_entries(struct idxd_device *idxd)
evl_status.head = h;
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
}
irqreturn_t idxd_misc_thread(int vec, void *data)
diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c
index fdda6d6042629..5e94247e1ea70 100644
--- a/drivers/dma/idxd/perfmon.c
+++ b/drivers/dma/idxd/perfmon.c
@@ -528,14 +528,11 @@ static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
-
/* migrate events if there is a valid target */
- if (target < nr_cpu_ids)
+ if (target < nr_cpu_ids) {
cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
- else
- target = -1;
-
- perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+ perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+ }
return 0;
}
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 4e76c4ec2d396..e001f4f7aa640 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -250,7 +250,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
else
regval &= ~val;
- writel(val, pchan->base + reg);
+ writel(regval, pchan->base + reg);
}
static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
@@ -274,7 +274,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
else
regval &= ~val;
- writel(val, od->base + reg);
+ writel(regval, od->base + reg);
}
static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 5f6d7f1e095f9..ad8e3da1b2cd2 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1053,9 +1053,6 @@ static bool _trigger(struct pl330_thread *thrd)
thrd->req_running = idx;
- if (desc->rqtype == DMA_MEM_TO_DEV || desc->rqtype == DMA_DEV_TO_MEM)
- UNTIL(thrd, PL330_STATE_WFP);
-
return true;
}
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index 88547a23825b1..3642508e88bb2 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -746,6 +746,9 @@ static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
bytes_xfer = dma_desc->bytes_xfer +
sg_req[dma_desc->sg_idx].len - (wcount * 4);
+ if (dma_desc->bytes_req == bytes_xfer)
+ return 0;
+
residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
return residual;
diff --git a/drivers/dma/xilinx/xdma-regs.h b/drivers/dma/xilinx/xdma-regs.h
index 98f5f6fb9ff9c..6ad08878e9386 100644
--- a/drivers/dma/xilinx/xdma-regs.h
+++ b/drivers/dma/xilinx/xdma-regs.h
@@ -117,6 +117,9 @@ struct xdma_hw_desc {
CHAN_CTRL_IE_WRITE_ERROR | \
CHAN_CTRL_IE_DESC_ERROR)
+/* bits of the channel status register */
+#define XDMA_CHAN_STATUS_BUSY BIT(0)
+
#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START
#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 170017ff2aad6..313b217388fe9 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -71,6 +71,8 @@ struct xdma_chan {
enum dma_transfer_direction dir;
struct dma_slave_config cfg;
u32 irq;
+ struct completion last_interrupt;
+ bool stop_requested;
};
/**
@@ -376,6 +378,8 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
return ret;
xchan->busy = true;
+ xchan->stop_requested = false;
+ reinit_completion(&xchan->last_interrupt);
return 0;
}
@@ -387,7 +391,6 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
static int xdma_xfer_stop(struct xdma_chan *xchan)
{
int ret;
- u32 val;
struct xdma_device *xdev = xchan->xdev_hdl;
/* clear run stop bit to prevent any further auto-triggering */
@@ -395,13 +398,7 @@ static int xdma_xfer_stop(struct xdma_chan *xchan)
CHAN_CTRL_RUN_STOP);
if (ret)
return ret;
-
- /* Clear the channel status register */
- ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val);
- if (ret)
- return ret;
-
- return 0;
+ return ret;
}
/**
@@ -474,6 +471,8 @@ static int xdma_alloc_channels(struct xdma_device *xdev,
xchan->xdev_hdl = xdev;
xchan->base = base + i * XDMA_CHAN_STRIDE;
xchan->dir = dir;
+ xchan->stop_requested = false;
+ init_completion(&xchan->last_interrupt);
ret = xdma_channel_init(xchan);
if (ret)
@@ -521,6 +520,7 @@ static int xdma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
xdma_chan->busy = false;
+ xdma_chan->stop_requested = true;
vd = vchan_next_desc(&xdma_chan->vchan);
if (vd) {
list_del(&vd->node);
@@ -542,17 +542,26 @@ static int xdma_terminate_all(struct dma_chan *chan)
static void xdma_synchronize(struct dma_chan *chan)
{
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_device *xdev = xdma_chan->xdev_hdl;
+ int st = 0;
+
+ /* If the engine continues running, wait for the last interrupt */
+ regmap_read(xdev->rmap, xdma_chan->base + XDMA_CHAN_STATUS, &st);
+ if (st & XDMA_CHAN_STATUS_BUSY)
+ wait_for_completion_timeout(&xdma_chan->last_interrupt, msecs_to_jiffies(1000));
vchan_synchronize(&xdma_chan->vchan);
}
/**
- * xdma_fill_descs - Fill hardware descriptors with contiguous memory block addresses
- * @sw_desc: tx descriptor state container
- * @src_addr: Value for a ->src_addr field of a first descriptor
- * @dst_addr: Value for a ->dst_addr field of a first descriptor
- * @size: Total size of a contiguous memory block
- * @filled_descs_num: Number of filled hardware descriptors for corresponding sw_desc
+ * xdma_fill_descs() - Fill hardware descriptors for one contiguous memory chunk.
+ * More than one descriptor will be used if the size is bigger
+ * than XDMA_DESC_BLEN_MAX.
+ * @sw_desc: Descriptor container
+ * @src_addr: First value for the ->src_addr field
+ * @dst_addr: First value for the ->dst_addr field
+ * @size: Size of the contiguous memory block
+ * @filled_descs_num: Index of the first descriptor to take care of in @sw_desc
*/
static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
u64 dst_addr, u32 size, u32 filled_descs_num)
@@ -704,7 +713,7 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
desc_num = 0;
for (i = 0; i < periods; i++) {
desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num);
- addr += i * period_size;
+ addr += period_size;
}
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
@@ -876,6 +885,9 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
u32 st;
bool repeat_tx;
+ if (xchan->stop_requested)
+ complete(&xchan->last_interrupt);
+
spin_lock(&xchan->vchan.lock);
/* get submitted request */
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index b82815e64d24e..eb0637d90342a 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -214,7 +214,8 @@ struct xilinx_dpdma_tx_desc {
* @running: true if the channel is running
* @first_frame: flag for the first frame of stream
* @video_group: flag if multi-channel operation is needed for video channels
- * @lock: lock to access struct xilinx_dpdma_chan
+ * @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
+ * @vchan.lock, if both are to be held.
* @desc_pool: descriptor allocation pool
* @err_task: error IRQ bottom half handler
* @desc: References to descriptors being processed
@@ -1097,12 +1098,14 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
* Complete the active descriptor, if any, promote the pending
* descriptor to active, and queue the next transfer, if any.
*/
+ spin_lock(&chan->vchan.lock);
if (chan->desc.active)
vchan_cookie_complete(&chan->desc.active->vdesc);
chan->desc.active = pending;
chan->desc.pending = NULL;
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
out:
spin_unlock_irqrestore(&chan->lock, flags);
@@ -1264,10 +1267,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
unsigned long flags;
- spin_lock_irqsave(&chan->vchan.lock, flags);
+ spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
if (vchan_issue_pending(&chan->vchan))
xilinx_dpdma_chan_queue_transfer(chan);
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ spin_unlock(&chan->vchan.lock);
+ spin_unlock_irqrestore(&chan->lock, flags);
}
static int xilinx_dpdma_config(struct dma_chan *dchan,
@@ -1495,7 +1500,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
spin_unlock_irqrestore(&chan->lock, flags);
}
diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
index 32188f098ef34..bc550ad0dbe0c 100644
--- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
+++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
@@ -221,6 +221,19 @@ struct qsee_rsp_uefi_query_variable_info {
* alignment of 8 bytes (64 bits) for GUIDs. Our definition of efi_guid_t,
* however, has an alignment of 4 byte (32 bits). So far, this seems to work
* fine here. See also the comment on the typedef of efi_guid_t.
+ *
+ * Note: It looks like uefisecapp is quite picky about how the memory passed to
+ * it is structured and aligned. In particular the request/response setup used
+ * for QSEE_CMD_UEFI_GET_VARIABLE. While qcom_qseecom_app_send(), in theory,
+ * accepts separate buffers/addresses for the request and response parts, in
+ * practice, however, it seems to expect them to be both part of a larger
+ * contiguous block. We initially allocated separate buffers for the request
+ * and response but this caused the QSEE_CMD_UEFI_GET_VARIABLE command to
+ * either not write any response to the response buffer or outright crash the
+ * device. Therefore, we now allocate a single contiguous block of DMA memory
+ * for both and properly align the data using the macros below. In particular,
+ * request and response structs are aligned at 8 byte (via __reqdata_offs()),
+ * following the driver that this has been reverse-engineered from.
*/
#define qcuefi_buf_align_fields(fields...) \
({ \
@@ -244,6 +257,12 @@ struct qsee_rsp_uefi_query_variable_info {
#define __array_offs(type, count, offset) \
__field_impl(sizeof(type) * (count), __alignof__(type), offset)
+#define __array_offs_aligned(type, count, align, offset) \
+ __field_impl(sizeof(type) * (count), align, offset)
+
+#define __reqdata_offs(size, offset) \
+ __array_offs_aligned(u8, size, 8, offset)
+
#define __array(type, count) __array_offs(type, count, NULL)
#define __field_offs(type, offset) __array_offs(type, 1, offset)
#define __field(type) __array_offs(type, 1, NULL)
@@ -277,10 +296,15 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
unsigned long buffer_size = *data_size;
efi_status_t efi_status = EFI_SUCCESS;
unsigned long name_length;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
size_t guid_offs;
size_t name_offs;
size_t req_size;
size_t rsp_size;
+ size_t req_offs;
+ size_t rsp_offs;
ssize_t status;
if (!name || !guid)
@@ -304,17 +328,19 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
__array(u8, buffer_size)
);
- req_data = kzalloc(req_size, GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(req_size, &req_offs)
+ __reqdata_offs(rsp_size, &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(rsp_size, GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_GET_VARIABLE;
req_data->data_size = buffer_size;
@@ -332,7 +358,9 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size);
- status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data, rsp_size);
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, req_size,
+ cmd_buf_dma + rsp_offs, rsp_size);
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -407,9 +435,7 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
memcpy(data, ((void *)rsp_data) + rsp_data->data_offset, rsp_data->data_size);
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
@@ -422,10 +448,15 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
struct qsee_rsp_uefi_set_variable *rsp_data;
efi_status_t efi_status = EFI_SUCCESS;
unsigned long name_length;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
size_t name_offs;
size_t guid_offs;
size_t data_offs;
size_t req_size;
+ size_t req_offs;
+ size_t rsp_offs;
ssize_t status;
if (!name || !guid)
@@ -450,17 +481,19 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
__array_offs(u8, data_size, &data_offs)
);
- req_data = kzalloc(req_size, GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(req_size, &req_offs)
+ __reqdata_offs(sizeof(*rsp_data), &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(sizeof(*rsp_data), GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_SET_VARIABLE;
req_data->attributes = attributes;
@@ -483,8 +516,9 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
if (data_size)
memcpy(((void *)req_data) + req_data->data_offset, data, req_data->data_size);
- status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data,
- sizeof(*rsp_data));
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, req_size,
+ cmd_buf_dma + rsp_offs, sizeof(*rsp_data));
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -507,9 +541,7 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
}
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
@@ -521,10 +553,15 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
struct qsee_req_uefi_get_next_variable *req_data;
struct qsee_rsp_uefi_get_next_variable *rsp_data;
efi_status_t efi_status = EFI_SUCCESS;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
size_t guid_offs;
size_t name_offs;
size_t req_size;
size_t rsp_size;
+ size_t req_offs;
+ size_t rsp_offs;
ssize_t status;
if (!name_size || !name || !guid)
@@ -545,17 +582,19 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
__array(*name, *name_size / sizeof(*name))
);
- req_data = kzalloc(req_size, GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(req_size, &req_offs)
+ __reqdata_offs(rsp_size, &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(rsp_size, GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_GET_NEXT_VARIABLE;
req_data->guid_offset = guid_offs;
@@ -572,7 +611,9 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
goto out_free;
}
- status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data, rsp_size);
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, req_size,
+ cmd_buf_dma + rsp_offs, rsp_size);
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -645,9 +686,7 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
}
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
@@ -659,26 +698,34 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi,
struct qsee_req_uefi_query_variable_info *req_data;
struct qsee_rsp_uefi_query_variable_info *rsp_data;
efi_status_t efi_status = EFI_SUCCESS;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
+ size_t req_offs;
+ size_t rsp_offs;
int status;
- req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(sizeof(*req_data), &req_offs)
+ __reqdata_offs(sizeof(*rsp_data), &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(sizeof(*rsp_data), GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_QUERY_VARIABLE_INFO;
req_data->attributes = attr;
req_data->length = sizeof(*req_data);
- status = qcom_qseecom_app_send(qcuefi->client, req_data, sizeof(*req_data), rsp_data,
- sizeof(*rsp_data));
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, sizeof(*req_data),
+ cmd_buf_dma + rsp_offs, sizeof(*rsp_data));
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -711,9 +758,7 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi,
*max_variable_size = rsp_data->max_variable_size;
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 520de9b5633ab..90283f160a228 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -1576,9 +1576,9 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
/**
* qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
* @app_id: The ID of the target app.
- * @req: Request buffer sent to the app (must be DMA-mappable).
+ * @req: DMA address of the request buffer sent to the app.
* @req_size: Size of the request buffer.
- * @rsp: Response buffer, written to by the app (must be DMA-mappable).
+ * @rsp: DMA address of the response buffer, written to by the app.
* @rsp_size: Size of the response buffer.
*
* Sends a request to the QSEE app associated with the given ID and read back
@@ -1589,33 +1589,13 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
*
* Return: Zero on success, nonzero on failure.
*/
-int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
- size_t rsp_size)
+int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size)
{
struct qcom_scm_qseecom_resp res = {};
struct qcom_scm_desc desc = {};
- dma_addr_t req_phys;
- dma_addr_t rsp_phys;
int status;
- /* Map request buffer */
- req_phys = dma_map_single(__scm->dev, req, req_size, DMA_TO_DEVICE);
- status = dma_mapping_error(__scm->dev, req_phys);
- if (status) {
- dev_err(__scm->dev, "qseecom: failed to map request buffer\n");
- return status;
- }
-
- /* Map response buffer */
- rsp_phys = dma_map_single(__scm->dev, rsp, rsp_size, DMA_FROM_DEVICE);
- status = dma_mapping_error(__scm->dev, rsp_phys);
- if (status) {
- dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE);
- dev_err(__scm->dev, "qseecom: failed to map response buffer\n");
- return status;
- }
-
- /* Set up SCM call data */
desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
@@ -1623,18 +1603,13 @@ int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
QCOM_SCM_RW, QCOM_SCM_VAL,
QCOM_SCM_RW, QCOM_SCM_VAL);
desc.args[0] = app_id;
- desc.args[1] = req_phys;
+ desc.args[1] = req;
desc.args[2] = req_size;
- desc.args[3] = rsp_phys;
+ desc.args[3] = rsp;
desc.args[4] = rsp_size;
- /* Perform call */
status = qcom_scm_qseecom_call(&desc, &res);
- /* Unmap buffers */
- dma_unmap_single(__scm->dev, rsp_phys, rsp_size, DMA_FROM_DEVICE);
- dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE);
-
if (status)
return status;
diff --git a/drivers/gpio/gpio-tangier.c b/drivers/gpio/gpio-tangier.c
index b75e0b12087ac..4b29abafecf6a 100644
--- a/drivers/gpio/gpio-tangier.c
+++ b/drivers/gpio/gpio-tangier.c
@@ -195,7 +195,8 @@ static int tng_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
static void tng_irq_ack(struct irq_data *d)
{
- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
void __iomem *gisr;
u8 shift;
@@ -227,7 +228,8 @@ static void tng_irq_unmask_mask(struct tng_gpio *priv, u32 gpio, bool unmask)
static void tng_irq_mask(struct irq_data *d)
{
- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
tng_irq_unmask_mask(priv, gpio, false);
@@ -236,7 +238,8 @@ static void tng_irq_mask(struct irq_data *d)
static void tng_irq_unmask(struct irq_data *d)
{
- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
gpiochip_enable_irq(&priv->chip, gpio);
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index d87dd06db40d0..9130c691a2dd3 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -36,12 +36,6 @@
#define TEGRA186_GPIO_SCR_SEC_REN BIT(27)
#define TEGRA186_GPIO_SCR_SEC_G1W BIT(9)
#define TEGRA186_GPIO_SCR_SEC_G1R BIT(1)
-#define TEGRA186_GPIO_FULL_ACCESS (TEGRA186_GPIO_SCR_SEC_WEN | \
- TEGRA186_GPIO_SCR_SEC_REN | \
- TEGRA186_GPIO_SCR_SEC_G1R | \
- TEGRA186_GPIO_SCR_SEC_G1W)
-#define TEGRA186_GPIO_SCR_SEC_ENABLE (TEGRA186_GPIO_SCR_SEC_WEN | \
- TEGRA186_GPIO_SCR_SEC_REN)
/* control registers */
#define TEGRA186_GPIO_ENABLE_CONFIG 0x00
@@ -177,10 +171,18 @@ static inline bool tegra186_gpio_is_accessible(struct tegra_gpio *gpio, unsigned
value = __raw_readl(secure + TEGRA186_GPIO_SCR);
- if ((value & TEGRA186_GPIO_SCR_SEC_ENABLE) == 0)
- return true;
+ /*
+ * When SCR_SEC_[R|W]EN is unset, then we have full read/write access to all the
+ * registers for given GPIO pin.
+ * When SCR_SEC[R|W]EN is set, then there is need to further check the accompanying
+ * SCR_SEC_G1[R|W] bit to determine read/write access to all the registers for given
+ * GPIO pin.
+ */
- if ((value & TEGRA186_GPIO_FULL_ACCESS) == TEGRA186_GPIO_FULL_ACCESS)
+ if (((value & TEGRA186_GPIO_SCR_SEC_REN) == 0 ||
+ ((value & TEGRA186_GPIO_SCR_SEC_REN) && (value & TEGRA186_GPIO_SCR_SEC_G1R))) &&
+ ((value & TEGRA186_GPIO_SCR_SEC_WEN) == 0 ||
+ ((value & TEGRA186_GPIO_SCR_SEC_WEN) && (value & TEGRA186_GPIO_SCR_SEC_G1W))))
return true;
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index df58a6a1a67ec..2131de36e3dac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1854,6 +1854,7 @@ err_node_allow:
err_bo_create:
amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
err_reserve_limit:
+ amdgpu_sync_free(&(*mem)->sync);
mutex_destroy(&(*mem)->lock);
if (gobj)
drm_gem_object_put(gobj);
@@ -2900,13 +2901,12 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
amdgpu_sync_create(&sync_obj);
- /* Validate BOs and map them to GPUVM (update VM page tables). */
+ /* Validate BOs managed by KFD */
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list) {
struct amdgpu_bo *bo = mem->bo;
uint32_t domain = mem->domain;
- struct kfd_mem_attachment *attachment;
struct dma_resv_iter cursor;
struct dma_fence *fence;
@@ -2931,6 +2931,25 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
goto validate_map_fail;
}
}
+ }
+
+ if (failed_size)
+ pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
+
+ /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
+ * validations above would invalidate DMABuf imports again.
+ */
+ ret = process_validate_vms(process_info, &exec.ticket);
+ if (ret) {
+ pr_debug("Validating VMs failed, ret: %d\n", ret);
+ goto validate_map_fail;
+ }
+
+ /* Update mappings managed by KFD. */
+ list_for_each_entry(mem, &process_info->kfd_bo_list,
+ validate_list) {
+ struct kfd_mem_attachment *attachment;
+
list_for_each_entry(attachment, &mem->attachments, list) {
if (!attachment->is_mapped)
continue;
@@ -2947,18 +2966,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
}
}
- if (failed_size)
- pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
-
- /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
- * validations above would invalidate DMABuf imports again.
- */
- ret = process_validate_vms(process_info, &exec.ticket);
- if (ret) {
- pr_debug("Validating VMs failed, ret: %d\n", ret);
- goto validate_map_fail;
- }
-
/* Update mappings not managed by KFD */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index a00cf4756ad0e..1569bef030eac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -1132,6 +1132,7 @@ void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
return;
amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
+ del_timer_sync(&ring->fence_drv.fallback_timer);
amdgpu_ring_fini(ring);
kfree(ring);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 2099159a693fa..ce733e3cb35d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -605,6 +605,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
else
amdgpu_bo_placement_from_domain(bo, bp->domain);
if (bp->type == ttm_bo_type_kernel)
+ bo->tbo.priority = 2;
+ else if (!(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE))
bo->tbo.priority = 1;
if (!bp->destroy)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
index 0df97c3e3a700..f7c73533e336f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -774,6 +774,9 @@ static int umsch_mm_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend)
+ return 0;
+
return umsch_mm_test(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 6695481f870f8..c23d97d34b7ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -205,7 +205,7 @@ disable_dpm:
dpm_ctl &= 0xfffffffe; /* Disable DPM */
WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
dev_dbg(adev->dev, "%s: disable vpe dpm\n", __func__);
- return 0;
+ return -EINVAL;
}
int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index f90905ef32c76..701146d649c35 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -9186,7 +9186,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
7 + /* PIPELINE_SYNC */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* VM_FLUSH */
+ 4 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
4 + /* double SWITCH_BUFFER,
@@ -9276,7 +9276,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
7 + /* gfx_v10_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* gfx_v10_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
.emit_ib = gfx_v10_0_ring_emit_ib_compute,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index f7325b02a191f..f00e05aba46a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -6192,7 +6192,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
7 + /* PIPELINE_SYNC */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* VM_FLUSH */
+ 4 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
5 + /* COND_EXEC */
@@ -6278,7 +6278,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
7 + /* gfx_v11_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* gfx_v11_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
.emit_ib = gfx_v11_0_ring_emit_ib_compute,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 6f97a6d0e6d05..99dbd2341120d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -6981,7 +6981,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
7 + /* gfx_v9_0_emit_mem_sync */
5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
@@ -7019,7 +7018,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 82eab49be82bb..e708468ac54dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -368,7 +368,8 @@ static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0
+ << (ring->me % adev->sdma.num_inst_per_aid);
sdma_v4_4_2_wait_reg_mem(ring, 0, 1,
adev->nbio.funcs->get_hdp_flush_done_offset(adev),
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 42f4bd250def6..da01b524b9f2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -280,17 +280,21 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
- SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
- SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
- amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
- amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
- amdgpu_ring_write(ring, ref_and_mask); /* reference */
- amdgpu_ring_write(ring, ref_and_mask); /* mask */
- amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
- SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+ if (ring->me > 1) {
+ amdgpu_asic_flush_hdp(adev, ring);
+ } else {
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
+ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
+ amdgpu_ring_write(ring, ref_and_mask); /* reference */
+ amdgpu_ring_write(ring, ref_and_mask); /* mask */
+ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+ }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
index 769eb8f7bb3c5..09315dd5a1ec9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
@@ -144,6 +144,12 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
WREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL), ret);
}
+ /* setup collaborate mode */
+ vpe_v6_1_set_collaborate_mode(vpe, true);
+ /* setup DPM */
+ if (amdgpu_vpe_configure_dpm(vpe))
+ dev_warn(adev->dev, "VPE failed to enable DPM\n");
+
/*
* For VPE 6.1.1, still only need to add master's offset, and psp will apply it to slave as well.
* Here use instance 0 as master.
@@ -159,11 +165,7 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
adev->vpe.cmdbuf_cpu_addr[0] = f32_offset;
adev->vpe.cmdbuf_cpu_addr[1] = f32_cntl;
- amdgpu_vpe_psp_update_sram(adev);
- vpe_v6_1_set_collaborate_mode(vpe, true);
- amdgpu_vpe_configure_dpm(vpe);
-
- return 0;
+ return amdgpu_vpe_psp_update_sram(adev);
}
vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data;
@@ -196,8 +198,6 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
}
vpe_v6_1_halt(vpe, false);
- vpe_v6_1_set_collaborate_mode(vpe, true);
- amdgpu_vpe_configure_dpm(vpe);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index bdc01ca9609a7..5c8d81bfce7ab 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -509,10 +509,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
start = start_mgr << PAGE_SHIFT;
end = (last_mgr + 1) << PAGE_SHIFT;
+ r = amdgpu_amdkfd_reserve_mem_limit(node->adev,
+ prange->npages * PAGE_SIZE,
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
+ node->xcp ? node->xcp->id : 0);
+ if (r) {
+ dev_dbg(node->adev->dev, "failed to reserve VRAM, r: %ld\n", r);
+ return -ENOSPC;
+ }
+
r = svm_range_vram_node_new(node, prange, true);
if (r) {
dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
- return r;
+ goto out;
}
ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT;
@@ -545,6 +554,11 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
svm_range_vram_node_free(prange);
}
+out:
+ amdgpu_amdkfd_unreserve_mem_limit(node->adev,
+ prange->npages * PAGE_SIZE,
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
+ node->xcp ? node->xcp->id : 0);
return r < 0 ? r : 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index b79986412cd83..58c1fe5421934 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1922,6 +1922,8 @@ static int signal_eviction_fence(struct kfd_process *p)
rcu_read_lock();
ef = dma_fence_get_rcu_safe(&p->ef);
rcu_read_unlock();
+ if (!ef)
+ return -EINVAL;
ret = dma_fence_signal(ef);
dma_fence_put(ef);
@@ -1949,10 +1951,9 @@ static void evict_process_worker(struct work_struct *work)
* they are responsible stopping the queues and scheduling
* the restore work.
*/
- if (!signal_eviction_fence(p))
- queue_delayed_work(kfd_restore_wq, &p->restore_work,
- msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
- else
+ if (signal_eviction_fence(p) ||
+ mod_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
kfd_process_restore_queues(p);
pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
@@ -2011,9 +2012,9 @@ static void restore_process_worker(struct work_struct *work)
if (ret) {
pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
p->pasid, PROCESS_BACK_OFF_TIME_MS);
- ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
- msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
- WARN(!ret, "reschedule restore work failed\n");
+ if (mod_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
+ kfd_process_restore_queues(p);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index f0f7f48af4137..386875e6eb96b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -3426,7 +3426,7 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
mm, KFD_MIGRATE_TRIGGER_PREFETCH);
*migrated = !r;
- return r;
+ return 0;
}
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6d2f60c61decc..f3f94d109726d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3029,6 +3029,7 @@ static int dm_resume(void *handle)
dc_stream_release(dm_new_crtc_state->stream);
dm_new_crtc_state->stream = NULL;
}
+ dm_new_crtc_state->base.color_mgmt_changed = true;
}
for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index f09b9d49297e8..bbd0169010c2d 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -4261,6 +4261,13 @@ static int amdgpu_od_set_init(struct amdgpu_device *adev)
}
}
+ /*
+ * If gpu_od is the only member in the list, that means gpu_od is an
+ * empty directory, so remove it.
+ */
+ if (list_is_singular(&adev->pm.od_kobj_list))
+ goto err_out;
+
return 0;
err_out:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 3957af057d54f..c977ebe88001d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -2294,6 +2294,17 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
return sizeof(*gpu_metrics);
}
+static void smu_v13_0_6_restore_pci_config(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ for (i = 0; i < 16; i++)
+ pci_write_config_dword(adev->pdev, i * 4,
+ adev->pdev->saved_config_space[i]);
+ pci_restore_msi_state(adev->pdev);
+}
+
static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
{
int ret = 0, index;
@@ -2315,6 +2326,20 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
/* Restore the config space saved during init */
amdgpu_device_load_pci_state(adev->pdev);
+ /* Certain platforms have switches which assign virtual BAR values to
+ * devices. OS uses the virtual BAR values and device behind the switch
+ * is assgined another BAR value. When device's config space registers
+ * are queried, switch returns the virtual BAR values. When mode-2 reset
+ * is performed, switch is unaware of it, and will continue to return
+ * the same virtual values to the OS.This affects
+ * pci_restore_config_space() API as it doesn't write the value saved if
+ * the current value read from config space is the same as what is
+ * saved. As a workaround, make sure the config space is restored
+ * always.
+ */
+ if (!(adev->flags & AMD_IS_APU))
+ smu_v13_0_6_restore_pci_config(smu);
+
dev_dbg(smu->adev->dev, "wait for reset ack\n");
do {
ret = smu_cmn_wait_for_response(smu);
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index e440f458b6633..93337543aac32 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -224,8 +224,8 @@ __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane,
__drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base);
- drm_format_conv_state_copy(&shadow_plane_state->fmtcnv_state,
- &new_shadow_plane_state->fmtcnv_state);
+ drm_format_conv_state_copy(&new_shadow_plane_state->fmtcnv_state,
+ &shadow_plane_state->fmtcnv_state);
}
EXPORT_SYMBOL(__drm_gem_duplicate_shadow_plane_state);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 734412aae94dd..a9bf426f69b36 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -164,26 +164,6 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
*value = gpu->identity.eco_id;
break;
- case ETNAVIV_PARAM_GPU_NN_CORE_COUNT:
- *value = gpu->identity.nn_core_count;
- break;
-
- case ETNAVIV_PARAM_GPU_NN_MAD_PER_CORE:
- *value = gpu->identity.nn_mad_per_core;
- break;
-
- case ETNAVIV_PARAM_GPU_TP_CORE_COUNT:
- *value = gpu->identity.tp_core_count;
- break;
-
- case ETNAVIV_PARAM_GPU_ON_CHIP_SRAM_SIZE:
- *value = gpu->identity.on_chip_sram_size;
- break;
-
- case ETNAVIV_PARAM_GPU_AXI_SRAM_SIZE:
- *value = gpu->identity.axi_sram_size;
- break;
-
default:
DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
return -EINVAL;
@@ -663,8 +643,8 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
/* Disable TX clock gating on affected core revisions. */
if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
- etnaviv_is_model_rev(gpu, GC2000, 0x6202) ||
- etnaviv_is_model_rev(gpu, GC2000, 0x6203))
+ etnaviv_is_model_rev(gpu, GC7000, 0x6202) ||
+ etnaviv_is_model_rev(gpu, GC7000, 0x6203))
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
/* Disable SE and RA clock gating on affected core revisions. */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 7d5e9158e13c1..197e0037732ec 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -54,18 +54,6 @@ struct etnaviv_chip_identity {
/* Number of Neural Network cores. */
u32 nn_core_count;
- /* Number of MAD units per Neural Network core. */
- u32 nn_mad_per_core;
-
- /* Number of Tensor Processing cores. */
- u32 tp_core_count;
-
- /* Size in bytes of the SRAM inside the NPU. */
- u32 on_chip_sram_size;
-
- /* Size in bytes of the SRAM across the AXI bus. */
- u32 axi_sram_size;
-
/* Size of the vertex cache. */
u32 vertex_cache_size;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
index d8e7334de8cea..8665f2658d51b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
@@ -17,10 +17,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 128,
.shader_core_count = 1,
.nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 8,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -52,11 +48,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 256,
.shader_core_count = 1,
- .nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 8,
.vertex_output_buffer_size = 512,
.pixel_pipes = 1,
@@ -89,10 +80,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 512,
.shader_core_count = 2,
.nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -125,10 +112,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 512,
.shader_core_count = 2,
.nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -160,11 +143,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 512,
.shader_core_count = 2,
- .nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -197,10 +175,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 1024,
.shader_core_count = 4,
.nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 2,
@@ -233,10 +207,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 256,
.shader_core_count = 1,
.nn_core_count = 8,
- .nn_mad_per_core = 64,
- .tp_core_count = 4,
- .on_chip_sram_size = 524288,
- .axi_sram_size = 1048576,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -269,10 +239,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 256,
.shader_core_count = 1,
.nn_core_count = 6,
- .nn_mad_per_core = 64,
- .tp_core_count = 3,
- .on_chip_sram_size = 262144,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 4f302cd5e1a6c..58fed80c7392a 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -34,7 +34,6 @@ gma500_gfx-y += \
psb_intel_lvds.o \
psb_intel_modes.o \
psb_intel_sdvo.o \
- psb_lid.o \
psb_irq.o
gma500_gfx-$(CONFIG_ACPI) += opregion.o
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index dcfcd7b89d4a1..6dece8f0e380f 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -73,8 +73,7 @@ static int psb_backlight_setup(struct drm_device *dev)
}
psb_intel_lvds_set_brightness(dev, PSB_MAX_BRIGHTNESS);
- /* This must occur after the backlight is properly initialised */
- psb_lid_timer_init(dev_priv);
+
return 0;
}
@@ -259,8 +258,6 @@ static int psb_chip_setup(struct drm_device *dev)
static void psb_chip_teardown(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- psb_lid_timer_takedown(dev_priv);
gma_intel_teardown_gmbus(dev);
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index c5edfa4aa4ccd..83c17689c454f 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -162,7 +162,6 @@
#define PSB_NUM_VBLANKS 2
#define PSB_WATCHDOG_DELAY (HZ * 2)
-#define PSB_LID_DELAY (HZ / 10)
#define PSB_MAX_BRIGHTNESS 100
@@ -491,11 +490,7 @@ struct drm_psb_private {
/* Hotplug handling */
struct work_struct hotplug_work;
- /* LID-Switch */
- spinlock_t lid_lock;
- struct timer_list lid_timer;
struct psb_intel_opregion opregion;
- u32 lid_last_state;
/* Watchdog */
uint32_t apm_reg;
@@ -591,10 +586,6 @@ struct psb_ops {
int i2c_bus; /* I2C bus identifier for Moorestown */
};
-/* psb_lid.c */
-extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
-extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
-
/* modesetting */
extern void psb_modeset_init(struct drm_device *dev);
extern void psb_modeset_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
deleted file mode 100644
index 58a7fe3926360..0000000000000
--- a/drivers/gpu/drm/gma500/psb_lid.c
+++ /dev/null
@@ -1,80 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/**************************************************************************
- * Copyright (c) 2007, Intel Corporation.
- *
- * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
- **************************************************************************/
-
-#include <linux/spinlock.h>
-
-#include "psb_drv.h"
-#include "psb_intel_reg.h"
-#include "psb_reg.h"
-
-static void psb_lid_timer_func(struct timer_list *t)
-{
- struct drm_psb_private *dev_priv = from_timer(dev_priv, t, lid_timer);
- struct drm_device *dev = (struct drm_device *)&dev_priv->dev;
- struct timer_list *lid_timer = &dev_priv->lid_timer;
- unsigned long irq_flags;
- u32 __iomem *lid_state = dev_priv->opregion.lid_state;
- u32 pp_status;
-
- if (readl(lid_state) == dev_priv->lid_last_state)
- goto lid_timer_schedule;
-
- if ((readl(lid_state)) & 0x01) {
- /*lid state is open*/
- REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
- do {
- pp_status = REG_READ(PP_STATUS);
- } while ((pp_status & PP_ON) == 0 &&
- (pp_status & PP_SEQUENCE_MASK) != 0);
-
- if (REG_READ(PP_STATUS) & PP_ON) {
- /*FIXME: should be backlight level before*/
- psb_intel_lvds_set_brightness(dev, 100);
- } else {
- DRM_DEBUG("LVDS panel never powered up");
- return;
- }
- } else {
- psb_intel_lvds_set_brightness(dev, 0);
-
- REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
- do {
- pp_status = REG_READ(PP_STATUS);
- } while ((pp_status & PP_ON) == 0);
- }
- dev_priv->lid_last_state = readl(lid_state);
-
-lid_timer_schedule:
- spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
- if (!timer_pending(lid_timer)) {
- lid_timer->expires = jiffies + PSB_LID_DELAY;
- add_timer(lid_timer);
- }
- spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
-}
-
-void psb_lid_timer_init(struct drm_psb_private *dev_priv)
-{
- struct timer_list *lid_timer = &dev_priv->lid_timer;
- unsigned long irq_flags;
-
- spin_lock_init(&dev_priv->lid_lock);
- spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
-
- timer_setup(lid_timer, psb_lid_timer_func, 0);
-
- lid_timer->expires = jiffies + PSB_LID_DELAY;
-
- add_timer(lid_timer);
- spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
-}
-
-void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
-{
- del_timer_sync(&dev_priv->lid_timer);
-}
-
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index a0afe1ba6dd5c..f9705430ada93 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -378,7 +378,9 @@ static int gt_fw_domain_init(struct xe_gt *gt)
err);
/* Initialize CCS mode sysfs after early initialization of HW engines */
- xe_gt_ccs_mode_sysfs_init(gt);
+ err = xe_gt_ccs_mode_sysfs_init(gt);
+ if (err)
+ goto err_force_wake;
/*
* Stash hardware-reported version. Since this register does not exist
diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
index 529fc286cd06c..396aeb5b99242 100644
--- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
+++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
@@ -167,25 +167,20 @@ static void xe_gt_ccs_mode_sysfs_fini(struct drm_device *drm, void *arg)
* and it is expected that there are no open drm clients while doing so.
* The number of available compute slices is exposed to user through a per-gt
* 'num_cslices' sysfs interface.
+ *
+ * Returns: Returns error value for failure and 0 for success.
*/
-void xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
+int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
int err;
if (!xe_gt_ccs_mode_enabled(gt))
- return;
+ return 0;
err = sysfs_create_files(gt->sysfs, gt_ccs_mode_attrs);
- if (err) {
- drm_warn(&xe->drm, "Sysfs creation for ccs_mode failed err: %d\n", err);
- return;
- }
+ if (err)
+ return err;
- err = drmm_add_action_or_reset(&xe->drm, xe_gt_ccs_mode_sysfs_fini, gt);
- if (err) {
- sysfs_remove_files(gt->sysfs, gt_ccs_mode_attrs);
- drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
- }
+ return drmm_add_action_or_reset(&xe->drm, xe_gt_ccs_mode_sysfs_fini, gt);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.h b/drivers/gpu/drm/xe/xe_gt_ccs_mode.h
index f39975aaaab0d..f8779852cf0d2 100644
--- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.h
+++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.h
@@ -12,7 +12,7 @@
#include "xe_platform_types.h"
void xe_gt_apply_ccs_mode(struct xe_gt *gt);
-void xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt);
+int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt);
static inline bool xe_gt_ccs_mode_enabled(const struct xe_gt *gt)
{
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 355edd4d758af..7f32547f94b26 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -1054,10 +1054,10 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
adj_len);
break;
case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
- ret = xe_guc_relay_process_guc2pf(&guc->relay, payload, adj_len);
+ ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len);
break;
case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
- ret = xe_guc_relay_process_guc2vf(&guc->relay, payload, adj_len);
+ ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len);
break;
default:
drm_err(&xe->drm, "unexpected action 0x%04x\n", action);
diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c
index b545f850087cd..6b9b1cbedd379 100644
--- a/drivers/gpu/drm/xe/xe_huc.c
+++ b/drivers/gpu/drm/xe/xe_huc.c
@@ -53,7 +53,6 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
struct xe_gt *gt = huc_to_gt(huc);
struct xe_device *xe = gt_to_xe(gt);
struct xe_bo *bo;
- int err;
/* we use a single object for both input and output */
bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL,
@@ -66,13 +65,7 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
huc->gsc_pkt = bo;
- err = drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
- if (err) {
- free_gsc_pkt(&xe->drm, huc);
- return err;
- }
-
- return 0;
+ return drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
}
int xe_huc_init(struct xe_huc *huc)
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index ff5c486a1dbb1..db0d1ac82910e 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -2200,13 +2200,18 @@ static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs,
* Returns negative errno, else the number of messages executed.
*
* Adapter lock must be held when calling this function. No debug logging
- * takes place. adap->algo->master_xfer existence isn't checked.
+ * takes place.
*/
int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
unsigned long orig_jiffies;
int ret, try;
+ if (!adap->algo->master_xfer) {
+ dev_dbg(&adap->dev, "I2C level transfers not supported\n");
+ return -EOPNOTSUPP;
+ }
+
if (WARN_ON(!msgs || num < 1))
return -EINVAL;
@@ -2273,11 +2278,6 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
int ret;
- if (!adap->algo->master_xfer) {
- dev_dbg(&adap->dev, "I2C level transfers not supported\n");
- return -EOPNOTSUPP;
- }
-
/* REVISIT the fault reporting model here is weak:
*
* - When we get an error after receiving N bytes from a slave,
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 2a537cbfcb077..5f7d3db3afd82 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -4567,13 +4567,8 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i));
}
- if (err) {
- if (i > 0)
- its_vpe_irq_domain_free(domain, virq, i);
-
- its_lpi_free(bitmap, base, nr_ids);
- its_free_prop_table(vprop_page);
- }
+ if (err)
+ its_vpe_irq_domain_free(domain, virq, i);
return err;
}
diff --git a/drivers/md/dm-vdo/murmurhash3.c b/drivers/md/dm-vdo/murmurhash3.c
index 01d2743444ec6..3a989efae1420 100644
--- a/drivers/md/dm-vdo/murmurhash3.c
+++ b/drivers/md/dm-vdo/murmurhash3.c
@@ -137,7 +137,7 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
break;
default:
break;
- };
+ }
}
/* finalization */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 56aa2a8b9d715..7d0746b37c8ec 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -765,7 +765,7 @@ static struct table_device *open_table_device(struct mapped_device *md,
return td;
out_blkdev_put:
- fput(bdev_file);
+ __fput_sync(bdev_file);
out_free_td:
kfree(td);
return ERR_PTR(r);
@@ -778,7 +778,13 @@ static void close_table_device(struct table_device *td, struct mapped_device *md
{
if (md->disk->slave_dir)
bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
- fput(td->dm_dev.bdev_file);
+
+ /* Leverage async fput() if DMF_DEFERRED_REMOVE set */
+ if (unlikely(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
+ fput(td->dm_dev.bdev_file);
+ else
+ __fput_sync(td->dm_dev.bdev_file);
+
put_dax(td->dm_dev.dax_dev);
list_del(&td->list);
kfree(td);
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 572333ead5fb8..4bd4f32bcdabb 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -758,15 +758,6 @@ static int at24_probe(struct i2c_client *client)
}
pm_runtime_enable(dev);
- at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
- if (IS_ERR(at24->nvmem)) {
- pm_runtime_disable(dev);
- if (!pm_runtime_status_suspended(dev))
- regulator_disable(at24->vcc_reg);
- return dev_err_probe(dev, PTR_ERR(at24->nvmem),
- "failed to register nvmem\n");
- }
-
/*
* Perform a one-byte test read to verify that the chip is functional,
* unless powering on the device is to be avoided during probe (i.e.
@@ -782,6 +773,15 @@ static int at24_probe(struct i2c_client *client)
}
}
+ at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
+ if (IS_ERR(at24->nvmem)) {
+ pm_runtime_disable(dev);
+ if (!pm_runtime_status_suspended(dev))
+ regulator_disable(at24->vcc_reg);
+ return dev_err_probe(dev, PTR_ERR(at24->nvmem),
+ "failed to register nvmem\n");
+ }
+
/* If this a SPD EEPROM, probe for DDR3 thermal sensor */
if (cdata == &at24_data_spd)
at24_probe_temp_sensor(client);
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index b88d6dec209f5..9a5f75163acae 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -300,6 +300,7 @@ static void moxart_transfer_pio(struct moxart_host *host)
remain = sgm->length;
if (remain > host->data_len)
remain = host->data_len;
+ sgm->consumed = 0;
if (data->flags & MMC_DATA_WRITE) {
while (remain > 0) {
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 668e0aceeebac..e113b99a3eab5 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2694,6 +2694,11 @@ static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = true;
+ spin_unlock_irqrestore(&host->lock, flags);
/* Drop the performance vote */
dev_pm_opp_set_rate(dev, 0);
@@ -2708,6 +2713,7 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long flags;
int ret;
ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
@@ -2726,7 +2732,15 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
dev_pm_opp_set_rate(dev, msm_host->clk_rate);
- return sdhci_msm_ice_resume(msm_host);
+ ret = sdhci_msm_ice_resume(msm_host);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = false;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return ret;
}
static const struct dev_pm_ops sdhci_msm_pm_ops = {
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index 1d8f5a76096ae..f2e4a93ed1d61 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -626,6 +626,7 @@ static int th1520_execute_tuning(struct sdhci_host *host, u32 opcode)
/* perform tuning */
sdhci_start_tuning(host);
+ host->tuning_loop_count = 128;
host->tuning_err = __sdhci_execute_tuning(host, opcode);
if (host->tuning_err) {
/* disable auto-tuning upon tuning error */
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 5887feb347a4e..0de87bc638405 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -900,7 +900,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
config.name = compatible;
config.id = NVMEM_DEVID_AUTO;
config.owner = THIS_MODULE;
- config.add_legacy_fixed_of_cells = true;
+ config.add_legacy_fixed_of_cells = !mtd_type_is_nand(mtd);
config.type = NVMEM_TYPE_OTP;
config.root_only = true;
config.ignore_wp = true;
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index a8d12c71f987b..1b2ec0fec60c7 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -857,7 +857,7 @@ static inline void brcmnand_read_data_bus(struct brcmnand_controller *ctrl,
struct brcmnand_soc *soc = ctrl->soc;
int i;
- if (soc->read_data_bus) {
+ if (soc && soc->read_data_bus) {
soc->read_data_bus(soc, flash_cache, buffer, fc_words);
} else {
for (i = 0; i < fc_words; i++)
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 5243fab9face0..8db7fc4245711 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -53,7 +53,7 @@ static unsigned long doc_locations[] __initdata = {
0xe8000, 0xea000, 0xec000, 0xee000,
#endif
#endif
- 0xffffffff };
+};
static struct mtd_info *doclist = NULL;
@@ -1554,7 +1554,7 @@ static int __init init_nanddoc(void)
if (ret < 0)
return ret;
} else {
- for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
+ for (i = 0; i < ARRAY_SIZE(doc_locations); i++) {
doc_probe(doc_locations[i]);
}
}
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index b079605c84d38..b8cff9240b286 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2815,7 +2815,7 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
host->cfg0_raw & ~(7 << CW_PER_PAGE));
nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
instrs = 3;
- } else {
+ } else if (q_op.cmd_reg != OP_RESET_DEVICE) {
return 0;
}
@@ -2830,9 +2830,8 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
nandc_set_reg(chip, NAND_EXEC_CMD, 1);
write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
- (q_op.cmd_reg == OP_BLOCK_ERASE) ? write_reg_dma(nandc, NAND_DEV0_CFG0,
- 2, NAND_BAM_NEXT_SGL) : read_reg_dma(nandc,
- NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ if (q_op.cmd_reg == OP_BLOCK_ERASE)
+ write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index f29ef72a2f1da..0fae2b96b21fe 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -5758,7 +5758,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6141,
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6141",
- .num_databases = 4096,
+ .num_databases = 256,
.num_macs = 2048,
.num_ports = 6,
.num_internal_phys = 5,
@@ -6217,7 +6217,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6341",
- .num_databases = 4096,
+ .num_databases = 256,
.num_macs = 2048,
.num_internal_phys = 5,
.num_ports = 6,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index b1f84b37032a7..c7e7dac057a33 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet: " fmt
@@ -2467,14 +2467,18 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
{
u32 reg;
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- if (reg & CMD_SW_RESET)
+ if (reg & CMD_SW_RESET) {
+ spin_unlock_bh(&priv->reg_lock);
return;
+ }
if (enable)
reg |= mask;
else
reg &= ~mask;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
/* UniMAC stops on a packet boundary, wait for a full-size packet
* to be processed
@@ -2490,8 +2494,10 @@ static void reset_umac(struct bcmgenet_priv *priv)
udelay(10);
/* issue soft reset and disable MAC while updating its registers */
+ spin_lock_bh(&priv->reg_lock);
bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
udelay(2);
+ spin_unlock_bh(&priv->reg_lock);
}
static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
@@ -3334,7 +3340,9 @@ static void bcmgenet_netif_start(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
/* Start the network engine */
+ netif_addr_lock_bh(dev);
bcmgenet_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
bcmgenet_enable_rx_napi(priv);
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
@@ -3595,16 +3603,19 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
* 3. The number of filters needed exceeds the number filters
* supported by the hardware.
*/
+ spin_lock(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
(nfilter > MAX_MDF_FILTER)) {
reg |= CMD_PROMISC;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock(&priv->reg_lock);
bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
return;
} else {
reg &= ~CMD_PROMISC;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock(&priv->reg_lock);
}
/* update MDF filter */
@@ -4003,6 +4014,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
+ spin_lock_init(&priv->reg_lock);
spin_lock_init(&priv->lock);
/* Set default pause parameters */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 7523b60b3c1c0..43b923c48b14f 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#ifndef __BCMGENET_H__
@@ -573,6 +573,8 @@ struct bcmgenet_rxnfc_rule {
/* device context */
struct bcmgenet_priv {
void __iomem *base;
+ /* reg_lock: lock to serialize access to shared registers */
+ spinlock_t reg_lock;
enum bcmgenet_version version;
struct net_device *dev;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 7a41cad5788f4..1248792d7fd4d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet_wol: " fmt
@@ -151,6 +151,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
}
/* Can't suspend with WoL if MAC is still in reset */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
if (reg & CMD_SW_RESET)
reg &= ~CMD_SW_RESET;
@@ -158,6 +159,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
/* disable RX */
reg &= ~CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
mdelay(10);
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
@@ -203,6 +205,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
}
/* Enable CRC forward */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
priv->crc_fwd_en = 1;
reg |= CMD_CRC_FWD;
@@ -210,6 +213,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
/* Receiver must be enabled for WOL MP detection */
reg |= CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
reg = UMAC_IRQ_MPD_R;
if (hfb_enable)
@@ -256,7 +260,9 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
}
/* Disable CRC Forward */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~CMD_CRC_FWD;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 9ada893557475..c4a3698cef66f 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET MDIO routines
*
- * Copyright (c) 2014-2017 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#include <linux/acpi.h>
@@ -76,6 +76,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
reg |= RGMII_LINK;
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
CMD_HD_EN |
@@ -88,6 +89,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
reg |= CMD_TX_EN | CMD_RX_EN;
}
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
active = phy_init_eee(phydev, 0) >= 0;
bcmgenet_eee_enable_set(dev,
@@ -275,6 +277,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
* block for the interface to work, unconditionally clear the
* Out-of-band disable since we do not need it.
*/
+ mutex_lock(&phydev->lock);
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
reg &= ~OOB_DISABLE;
if (priv->ext_phy) {
@@ -286,6 +289,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
reg |= RGMII_MODE_EN;
}
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ mutex_unlock(&phydev->lock);
if (init)
dev_info(kdev, "configuring instance for %s\n", phy_name);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 7246e13dd559f..97291bfbeea58 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -312,7 +312,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
void *kern_buf;
/* Copy the user space buf */
- kern_buf = memdup_user(buf, nbytes);
+ kern_buf = memdup_user_nul(buf, nbytes);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
@@ -372,7 +372,7 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
void *kern_buf;
/* Copy the user space buf */
- kern_buf = memdup_user(buf, nbytes);
+ kern_buf = memdup_user_nul(buf, nbytes);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 49d5808b7d11d..de52bcb884c41 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2670,12 +2670,12 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
lb->loopback = 1;
q = &adap->sge.ethtxq[pi->first_qset];
- __netif_tx_lock(q->txq, smp_processor_id());
+ __netif_tx_lock_bh(q->txq);
reclaim_completed_tx(adap, &q->q, -1, true);
credits = txq_avail(&q->q) - ndesc;
if (unlikely(credits < 0)) {
- __netif_tx_unlock(q->txq);
+ __netif_tx_unlock_bh(q->txq);
return -ENOMEM;
}
@@ -2710,7 +2710,7 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
init_completion(&lb->completion);
txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(adap, &q->q, ndesc);
- __netif_tx_unlock(q->txq);
+ __netif_tx_unlock_bh(q->txq);
/* wait for the pkt to return */
ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 93544f1cc2a51..f7ae0e0aa4a4f 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -157,7 +157,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
* the lower time out
*/
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- usleep_range(50, 60);
+ udelay(50);
mdic = er32(MDIC);
if (mdic & E1000_MDIC_READY)
break;
@@ -181,7 +181,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
* reading duplicate data in the next MDIC transaction.
*/
if (hw->mac.type == e1000_pch2lan)
- usleep_range(100, 150);
+ udelay(100);
if (success) {
*data = (u16)mdic;
@@ -237,7 +237,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
* the lower time out
*/
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- usleep_range(50, 60);
+ udelay(50);
mdic = er32(MDIC);
if (mdic & E1000_MDIC_READY)
break;
@@ -261,7 +261,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
* reading duplicate data in the next MDIC transaction.
*/
if (hw->mac.type == e1000_pch2lan)
- usleep_range(100, 150);
+ udelay(100);
if (success)
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
index d252d98218d08..9fc0fd95a13d8 100644
--- a/drivers/net/ethernet/intel/ice/ice_debugfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
@@ -171,7 +171,7 @@ ice_debugfs_module_write(struct file *filp, const char __user *buf,
if (*ppos != 0 || count > 8)
return -EINVAL;
- cmd_buf = memdup_user(buf, count);
+ cmd_buf = memdup_user_nul(buf, count);
if (IS_ERR(cmd_buf))
return PTR_ERR(cmd_buf);
@@ -257,7 +257,7 @@ ice_debugfs_nr_messages_write(struct file *filp, const char __user *buf,
if (*ppos != 0 || count > 4)
return -EINVAL;
- cmd_buf = memdup_user(buf, count);
+ cmd_buf = memdup_user_nul(buf, count);
if (IS_ERR(cmd_buf))
return PTR_ERR(cmd_buf);
@@ -332,7 +332,7 @@ ice_debugfs_enable_write(struct file *filp, const char __user *buf,
if (*ppos != 0 || count > 2)
return -EINVAL;
- cmd_buf = memdup_user(buf, count);
+ cmd_buf = memdup_user_nul(buf, count);
if (IS_ERR(cmd_buf))
return PTR_ERR(cmd_buf);
@@ -428,7 +428,7 @@ ice_debugfs_log_size_write(struct file *filp, const char __user *buf,
if (*ppos != 0 || count > 5)
return -EINVAL;
- cmd_buf = memdup_user(buf, count);
+ cmd_buf = memdup_user_nul(buf, count);
if (IS_ERR(cmd_buf))
return PTR_ERR(cmd_buf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 2500f5ba4f5a4..881d704644fbe 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -999,12 +999,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
u16 pcifunc;
int ret, lf;
- cmd_buf = memdup_user(buffer, count + 1);
+ cmd_buf = memdup_user_nul(buffer, count);
if (IS_ERR(cmd_buf))
return -ENOMEM;
- cmd_buf[count] = '\0';
-
cmd_buf_tmp = strchr(cmd_buf, '\n');
if (cmd_buf_tmp) {
*cmd_buf_tmp = '\0';
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index a5ac21a0ee33f..cb6b33a228ea2 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1868,8 +1868,8 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
struct flow_cls_offload *f)
{
struct qede_arfs_fltr_node *n;
- int min_hlen, rc = -EINVAL;
struct qede_arfs_tuple t;
+ int min_hlen, rc;
__qede_lock(edev);
@@ -1879,7 +1879,8 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
}
/* parse flower attribute and prepare filter */
- if (qede_parse_flow_attr(edev, proto, f->rule, &t))
+ rc = qede_parse_flow_attr(edev, proto, f->rule, &t);
+ if (rc)
goto unlock;
/* Validate profile mode and number of filters */
@@ -1888,11 +1889,13 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
DP_NOTICE(edev,
"Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
t.mode, edev->arfs->mode, edev->arfs->filter_count);
+ rc = -EINVAL;
goto unlock;
}
/* parse tc actions and get the vf_id */
- if (qede_parse_actions(edev, &f->rule->action, f->common.extack))
+ rc = qede_parse_actions(edev, &f->rule->action, f->common.extack);
+ if (rc)
goto unlock;
if (qede_flow_find_fltr(edev, &t)) {
@@ -1998,10 +2001,9 @@ static int qede_flow_spec_to_rule(struct qede_dev *edev,
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (qede_parse_flow_attr(edev, proto, flow->rule, t)) {
- err = -EINVAL;
+ err = qede_parse_flow_attr(edev, proto, flow->rule, t);
+ if (err)
goto err_out;
- }
/* Make sure location is valid and filter isn't already set */
err = qede_flow_spec_validate(edev, &flow->rule->action, t,
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index a9a85d0f5a5d8..8884913e04738 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -1675,6 +1675,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
bool raw_proto = false;
void *oiph;
__be32 vni = 0;
+ int nh;
/* Need UDP and VXLAN header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN))
@@ -1765,12 +1766,28 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
skb->pkt_type = PACKET_HOST;
}
- oiph = skb_network_header(skb);
+ /* Save offset of outer header relative to skb->head,
+ * because we are going to reset the network header to the inner header
+ * and might change skb->head.
+ */
+ nh = skb_network_header(skb) - skb->head;
+
skb_reset_network_header(skb);
+ if (!pskb_inet_may_pull(skb)) {
+ DEV_STATS_INC(vxlan->dev, rx_length_errors);
+ DEV_STATS_INC(vxlan->dev, rx_errors);
+ vxlan_vnifilter_count(vxlan, vni, vninode,
+ VXLAN_VNI_STATS_RX_ERRORS, 0);
+ goto drop;
+ }
+
+ /* Get the outer header. */
+ oiph = skb->head + nh;
+
if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
- ++vxlan->dev->stats.rx_frame_errors;
- ++vxlan->dev->stats.rx_errors;
+ DEV_STATS_INC(vxlan->dev, rx_frame_errors);
+ DEV_STATS_INC(vxlan->dev, rx_errors);
vxlan_vnifilter_count(vxlan, vni, vninode,
VXLAN_VNI_STATS_RX_ERRORS, 0);
goto drop;
@@ -1840,7 +1857,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_TX_DROPS, 0);
goto out;
}
parp = arp_hdr(skb);
@@ -1896,7 +1915,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
reply->pkt_type = PACKET_HOST;
if (netif_rx(reply) == NET_RX_DROP) {
- dev->stats.rx_dropped++;
+ dev_core_stats_rx_dropped_inc(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2055,7 +2074,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
if (netif_rx(reply) == NET_RX_DROP) {
- dev->stats.rx_dropped++;
+ dev_core_stats_rx_dropped_inc(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2266,7 +2285,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
len);
} else {
drop:
- dev->stats.rx_dropped++;
+ dev_core_stats_rx_dropped_inc(dev);
vxlan_vnifilter_count(dst_vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2298,7 +2317,7 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
addr_family, dst_port,
vxlan->cfg.flags);
if (!dst_vxlan) {
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
@@ -2563,7 +2582,7 @@ out_unlock:
return;
drop:
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
return;
@@ -2571,11 +2590,11 @@ drop:
tx_error:
rcu_read_unlock();
if (err == -ELOOP)
- dev->stats.collisions++;
+ DEV_STATS_INC(dev, collisions);
else if (err == -ENETUNREACH)
- dev->stats.tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
dst_release(ndst);
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
}
@@ -2608,7 +2627,7 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
return;
drop:
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
@@ -2646,7 +2665,7 @@ static netdev_tx_t vxlan_xmit_nhid(struct sk_buff *skb, struct net_device *dev,
return NETDEV_TX_OK;
drop:
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
@@ -2743,7 +2762,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
!is_multicast_ether_addr(eth->h_dest))
vxlan_fdb_miss(vxlan, eth->h_dest);
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
kfree_skb(skb);
diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
index b700f52b7b679..11fcb1867118c 100644
--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
@@ -110,8 +110,10 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
/* Source clock from SoC internal PLL */
writel(ANA_PLL_CLK_OUT_TO_EXT_IO_SEL,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG062);
- writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
- imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
+ if (imx8_phy->drvdata->variant != IMX8MM) {
+ writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
+ }
val = ANA_AUX_RX_TX_SEL_TX | ANA_AUX_TX_TERM;
writel(val | ANA_AUX_RX_TERM_GND_EN,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG064);
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
index 41162d7228c91..1d1db1737422a 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
@@ -603,7 +603,7 @@ static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane,
u16 val;
fix_idx = 0;
- for (addr = 0; addr < 512; addr++) {
+ for (addr = 0; addr < ARRAY_SIZE(gbe_phy_init); addr++) {
/*
* All PHY register values are defined in full for 3.125Gbps
* SERDES speed. The values required for 1.25 Gbps are almost
@@ -611,11 +611,12 @@ static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane,
* comparison to 3.125 Gbps values. These register values are
* stored in "gbe_phy_init_fix" array.
*/
- if (!is_1gbps && gbe_phy_init_fix[fix_idx].addr == addr) {
+ if (!is_1gbps &&
+ fix_idx < ARRAY_SIZE(gbe_phy_init_fix) &&
+ gbe_phy_init_fix[fix_idx].addr == addr) {
/* Use new value */
val = gbe_phy_init_fix[fix_idx].value;
- if (fix_idx < ARRAY_SIZE(gbe_phy_init_fix))
- fix_idx++;
+ fix_idx++;
} else {
val = gbe_phy_init[addr];
}
diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c
index 03fb0d4b75d74..20d4c020a83c1 100644
--- a/drivers/phy/qualcomm/phy-qcom-m31.c
+++ b/drivers/phy/qualcomm/phy-qcom-m31.c
@@ -297,7 +297,7 @@ static int m31usb_phy_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(qphy->phy),
"failed to create phy\n");
- qphy->vreg = devm_regulator_get(dev, "vdda-phy");
+ qphy->vreg = devm_regulator_get(dev, "vdd");
if (IS_ERR(qphy->vreg))
return dev_err_probe(dev, PTR_ERR(qphy->vreg),
"failed to get vreg\n");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 7d585a4bbbba9..c21cdb8dbfe74 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -77,6 +77,7 @@ enum qphy_reg_layout {
QPHY_COM_BIAS_EN_CLKBUFLR_EN,
QPHY_DP_PHY_STATUS,
+ QPHY_DP_PHY_VCO_DIV,
QPHY_TX_TX_POL_INV,
QPHY_TX_TX_DRV_LVL,
@@ -102,6 +103,7 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V3_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V3_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V3_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V3_TX_TX_DRV_LVL,
@@ -126,6 +128,7 @@ static const unsigned int qmp_v45_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V4_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V4_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V4_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V4_TX_TX_DRV_LVL,
@@ -150,6 +153,7 @@ static const unsigned int qmp_v5_5nm_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V5_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V5_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V5_5NM_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V5_5NM_TX_TX_DRV_LVL,
@@ -174,6 +178,7 @@ static const unsigned int qmp_v6_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V6_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V6_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V6_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V6_TX_TX_DRV_LVL,
@@ -2150,9 +2155,9 @@ static bool qmp_combo_configure_dp_mode(struct qmp_combo *qmp)
writel(val, qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
if (reverse)
- writel(0x4c, qmp->pcs + QSERDES_DP_PHY_MODE);
+ writel(0x4c, qmp->dp_dp_phy + QSERDES_DP_PHY_MODE);
else
- writel(0x5c, qmp->pcs + QSERDES_DP_PHY_MODE);
+ writel(0x5c, qmp->dp_dp_phy + QSERDES_DP_PHY_MODE);
return reverse;
}
@@ -2162,6 +2167,7 @@ static int qmp_combo_configure_dp_clocks(struct qmp_combo *qmp)
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
u32 phy_vco_div;
unsigned long pixel_freq;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
switch (dp_opts->link_rate) {
case 1620:
@@ -2184,7 +2190,7 @@ static int qmp_combo_configure_dp_clocks(struct qmp_combo *qmp)
/* Other link rates aren't supported */
return -EINVAL;
}
- writel(phy_vco_div, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_VCO_DIV);
+ writel(phy_vco_div, qmp->dp_dp_phy + cfg->regs[QPHY_DP_PHY_VCO_DIV]);
clk_set_rate(qmp->dp_link_hw.clk, dp_opts->link_rate * 100000);
clk_set_rate(qmp->dp_pixel_hw.clk, pixel_freq);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h
index f5cfacf9be964..181057421c113 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h
@@ -7,6 +7,7 @@
#define QCOM_PHY_QMP_DP_PHY_V5_H_
/* Only for QMP V5 PHY - DP PHY registers */
+#define QSERDES_V5_DP_PHY_VCO_DIV 0x070
#define QSERDES_V5_DP_PHY_AUX_INTERRUPT_STATUS 0x0d8
#define QSERDES_V5_DP_PHY_STATUS 0x0dc
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h
index 01a20d3be4b81..fa967a1af058f 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h
@@ -7,6 +7,7 @@
#define QCOM_PHY_QMP_DP_PHY_V6_H_
/* Only for QMP V6 PHY - DP PHY registers */
+#define QSERDES_V6_DP_PHY_VCO_DIV 0x070
#define QSERDES_V6_DP_PHY_AUX_INTERRUPT_STATUS 0x0e0
#define QSERDES_V6_DP_PHY_STATUS 0x0e4
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index a34f67bb7e61a..b60a4b60451e5 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -87,6 +87,7 @@ config PHY_ROCKCHIP_SAMSUNG_HDPTX
tristate "Rockchip Samsung HDMI/eDP Combo PHY driver"
depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
select GENERIC_PHY
+ select RATIONAL
help
Enable this to support the Rockchip HDMI/eDP Combo PHY
with Samsung IP block.
diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
index 76b9cf417591d..bf74e429ff46e 100644
--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
@@ -125,12 +125,15 @@ struct rockchip_combphy_grfcfg {
};
struct rockchip_combphy_cfg {
+ unsigned int num_phys;
+ unsigned int phy_ids[3];
const struct rockchip_combphy_grfcfg *grfcfg;
int (*combphy_cfg)(struct rockchip_combphy_priv *priv);
};
struct rockchip_combphy_priv {
u8 type;
+ int id;
void __iomem *mmio;
int num_clks;
struct clk_bulk_data *clks;
@@ -320,7 +323,7 @@ static int rockchip_combphy_probe(struct platform_device *pdev)
struct rockchip_combphy_priv *priv;
const struct rockchip_combphy_cfg *phy_cfg;
struct resource *res;
- int ret;
+ int ret, id;
phy_cfg = of_device_get_match_data(dev);
if (!phy_cfg) {
@@ -338,6 +341,15 @@ static int rockchip_combphy_probe(struct platform_device *pdev)
return ret;
}
+ /* find the phy-id from the io address */
+ priv->id = -ENODEV;
+ for (id = 0; id < phy_cfg->num_phys; id++) {
+ if (res->start == phy_cfg->phy_ids[id]) {
+ priv->id = id;
+ break;
+ }
+ }
+
priv->dev = dev;
priv->type = PHY_NONE;
priv->cfg = phy_cfg;
@@ -562,6 +574,12 @@ static const struct rockchip_combphy_grfcfg rk3568_combphy_grfcfgs = {
};
static const struct rockchip_combphy_cfg rk3568_combphy_cfgs = {
+ .num_phys = 3,
+ .phy_ids = {
+ 0xfe820000,
+ 0xfe830000,
+ 0xfe840000,
+ },
.grfcfg = &rk3568_combphy_grfcfgs,
.combphy_cfg = rk3568_combphy_cfg,
};
@@ -578,8 +596,14 @@ static int rk3588_combphy_cfg(struct rockchip_combphy_priv *priv)
rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_pcie, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con2_for_pcie, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con3_for_pcie, true);
- rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l0_sel, true);
- rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l1_sel, true);
+ switch (priv->id) {
+ case 1:
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l0_sel, true);
+ break;
+ case 2:
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l1_sel, true);
+ break;
+ }
break;
case PHY_TYPE_USB3:
/* Set SSC downward spread spectrum */
@@ -736,6 +760,12 @@ static const struct rockchip_combphy_grfcfg rk3588_combphy_grfcfgs = {
};
static const struct rockchip_combphy_cfg rk3588_combphy_cfgs = {
+ .num_phys = 3,
+ .phy_ids = {
+ 0xfee00000,
+ 0xfee10000,
+ 0xfee20000,
+ },
.grfcfg = &rk3588_combphy_grfcfgs,
.combphy_cfg = rk3588_combphy_cfg,
};
diff --git a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
index 121e5961ce114..9857ee45b89e0 100644
--- a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
+++ b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
@@ -40,6 +40,8 @@
#define RK3588_BIFURCATION_LANE_0_1 BIT(0)
#define RK3588_BIFURCATION_LANE_2_3 BIT(1)
#define RK3588_LANE_AGGREGATION BIT(2)
+#define RK3588_PCIE1LN_SEL_EN (GENMASK(1, 0) << 16)
+#define RK3588_PCIE30_PHY_MODE_EN (GENMASK(2, 0) << 16)
struct rockchip_p3phy_ops;
@@ -132,7 +134,7 @@ static const struct rockchip_p3phy_ops rk3568_ops = {
static int rockchip_p3phy_rk3588_init(struct rockchip_p3phy_priv *priv)
{
u32 reg = 0;
- u8 mode = 0;
+ u8 mode = RK3588_LANE_AGGREGATION; /* default */
int ret;
/* Deassert PCIe PMA output clamp mode */
@@ -140,31 +142,24 @@ static int rockchip_p3phy_rk3588_init(struct rockchip_p3phy_priv *priv)
/* Set bifurcation if needed */
for (int i = 0; i < priv->num_lanes; i++) {
- if (!priv->lanes[i])
- mode |= (BIT(i) << 3);
-
if (priv->lanes[i] > 1)
- mode |= (BIT(i) >> 1);
- }
-
- if (!mode)
- reg = RK3588_LANE_AGGREGATION;
- else {
- if (mode & (BIT(0) | BIT(1)))
- reg |= RK3588_BIFURCATION_LANE_0_1;
-
- if (mode & (BIT(2) | BIT(3)))
- reg |= RK3588_BIFURCATION_LANE_2_3;
+ mode &= ~RK3588_LANE_AGGREGATION;
+ if (priv->lanes[i] == 3)
+ mode |= RK3588_BIFURCATION_LANE_0_1;
+ if (priv->lanes[i] == 4)
+ mode |= RK3588_BIFURCATION_LANE_2_3;
}
- regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0, (0x7<<16) | reg);
+ reg = mode;
+ regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0,
+ RK3588_PCIE30_PHY_MODE_EN | reg);
/* Set pcie1ln_sel in PHP_GRF_PCIESEL_CON */
if (!IS_ERR(priv->pipe_grf)) {
- reg = (mode & (BIT(6) | BIT(7))) >> 6;
+ reg = mode & (RK3588_BIFURCATION_LANE_0_1 | RK3588_BIFURCATION_LANE_2_3);
if (reg)
regmap_write(priv->pipe_grf, PHP_GRF_PCIESEL_CON,
- (reg << 16) | reg);
+ RK3588_PCIE1LN_SEL_EN | reg);
}
reset_control_deassert(priv->p30phy);
diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c
index 13cd614e12a1d..751fecd466e3e 100644
--- a/drivers/phy/ti/phy-tusb1210.c
+++ b/drivers/phy/ti/phy-tusb1210.c
@@ -69,7 +69,6 @@ struct tusb1210 {
struct delayed_work chg_det_work;
struct notifier_block psy_nb;
struct power_supply *psy;
- struct power_supply *charger;
#endif
};
@@ -236,19 +235,24 @@ static const char * const tusb1210_chargers[] = {
static bool tusb1210_get_online(struct tusb1210 *tusb)
{
+ struct power_supply *charger = NULL;
union power_supply_propval val;
- int i;
+ bool online = false;
+ int i, ret;
- for (i = 0; i < ARRAY_SIZE(tusb1210_chargers) && !tusb->charger; i++)
- tusb->charger = power_supply_get_by_name(tusb1210_chargers[i]);
+ for (i = 0; i < ARRAY_SIZE(tusb1210_chargers) && !charger; i++)
+ charger = power_supply_get_by_name(tusb1210_chargers[i]);
- if (!tusb->charger)
+ if (!charger)
return false;
- if (power_supply_get_property(tusb->charger, POWER_SUPPLY_PROP_ONLINE, &val))
- return false;
+ ret = power_supply_get_property(charger, POWER_SUPPLY_PROP_ONLINE, &val);
+ if (ret == 0)
+ online = val.intval;
+
+ power_supply_put(charger);
- return val.intval;
+ return online;
}
static void tusb1210_chg_det_work(struct work_struct *work)
@@ -473,9 +477,6 @@ static void tusb1210_remove_charger_detect(struct tusb1210 *tusb)
cancel_delayed_work_sync(&tusb->chg_det_work);
power_supply_unregister(tusb->psy);
}
-
- if (tusb->charger)
- power_supply_put(tusb->charger);
}
#else
static void tusb1210_probe_charger_detect(struct tusb1210 *tusb) { }
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index d376fa7114d1a..029efe16f8cc2 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -43,7 +43,7 @@
#define SCU614 0x614 /* Disable GPIO Internal Pull-Down #1 */
#define SCU618 0x618 /* Disable GPIO Internal Pull-Down #2 */
#define SCU61C 0x61c /* Disable GPIO Internal Pull-Down #3 */
-#define SCU620 0x620 /* Disable GPIO Internal Pull-Down #4 */
+#define SCU630 0x630 /* Disable GPIO Internal Pull-Down #4 */
#define SCU634 0x634 /* Disable GPIO Internal Pull-Down #5 */
#define SCU638 0x638 /* Disable GPIO Internal Pull-Down #6 */
#define SCU690 0x690 /* Multi-function Pin Control #24 */
@@ -2495,38 +2495,38 @@ static struct aspeed_pin_config aspeed_g6_configs[] = {
ASPEED_PULL_DOWN_PINCONF(D14, SCU61C, 0),
/* GPIOS7 */
- ASPEED_PULL_DOWN_PINCONF(T24, SCU620, 23),
+ ASPEED_PULL_DOWN_PINCONF(T24, SCU630, 23),
/* GPIOS6 */
- ASPEED_PULL_DOWN_PINCONF(P23, SCU620, 22),
+ ASPEED_PULL_DOWN_PINCONF(P23, SCU630, 22),
/* GPIOS5 */
- ASPEED_PULL_DOWN_PINCONF(P24, SCU620, 21),
+ ASPEED_PULL_DOWN_PINCONF(P24, SCU630, 21),
/* GPIOS4 */
- ASPEED_PULL_DOWN_PINCONF(R26, SCU620, 20),
+ ASPEED_PULL_DOWN_PINCONF(R26, SCU630, 20),
/* GPIOS3*/
- ASPEED_PULL_DOWN_PINCONF(R24, SCU620, 19),
+ ASPEED_PULL_DOWN_PINCONF(R24, SCU630, 19),
/* GPIOS2 */
- ASPEED_PULL_DOWN_PINCONF(T26, SCU620, 18),
+ ASPEED_PULL_DOWN_PINCONF(T26, SCU630, 18),
/* GPIOS1 */
- ASPEED_PULL_DOWN_PINCONF(T25, SCU620, 17),
+ ASPEED_PULL_DOWN_PINCONF(T25, SCU630, 17),
/* GPIOS0 */
- ASPEED_PULL_DOWN_PINCONF(R23, SCU620, 16),
+ ASPEED_PULL_DOWN_PINCONF(R23, SCU630, 16),
/* GPIOR7 */
- ASPEED_PULL_DOWN_PINCONF(U26, SCU620, 15),
+ ASPEED_PULL_DOWN_PINCONF(U26, SCU630, 15),
/* GPIOR6 */
- ASPEED_PULL_DOWN_PINCONF(W26, SCU620, 14),
+ ASPEED_PULL_DOWN_PINCONF(W26, SCU630, 14),
/* GPIOR5 */
- ASPEED_PULL_DOWN_PINCONF(T23, SCU620, 13),
+ ASPEED_PULL_DOWN_PINCONF(T23, SCU630, 13),
/* GPIOR4 */
- ASPEED_PULL_DOWN_PINCONF(U25, SCU620, 12),
+ ASPEED_PULL_DOWN_PINCONF(U25, SCU630, 12),
/* GPIOR3*/
- ASPEED_PULL_DOWN_PINCONF(V26, SCU620, 11),
+ ASPEED_PULL_DOWN_PINCONF(V26, SCU630, 11),
/* GPIOR2 */
- ASPEED_PULL_DOWN_PINCONF(V24, SCU620, 10),
+ ASPEED_PULL_DOWN_PINCONF(V24, SCU630, 10),
/* GPIOR1 */
- ASPEED_PULL_DOWN_PINCONF(U24, SCU620, 9),
+ ASPEED_PULL_DOWN_PINCONF(U24, SCU630, 9),
/* GPIOR0 */
- ASPEED_PULL_DOWN_PINCONF(V25, SCU620, 8),
+ ASPEED_PULL_DOWN_PINCONF(V25, SCU630, 8),
/* GPIOX7 */
ASPEED_PULL_DOWN_PINCONF(AB10, SCU634, 31),
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 6649357637ff3..cffeb869130dd 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -2124,13 +2124,7 @@ int pinctrl_enable(struct pinctrl_dev *pctldev)
error = pinctrl_claim_hogs(pctldev);
if (error) {
- dev_err(pctldev->dev, "could not claim hogs: %i\n",
- error);
- pinctrl_free_pindescs(pctldev, pctldev->desc->pins,
- pctldev->desc->npins);
- mutex_destroy(&pctldev->mutex);
- kfree(pctldev);
-
+ dev_err(pctldev->dev, "could not claim hogs: %i\n", error);
return error;
}
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index df1efc2e52025..6a94ecd6a8dea 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -220,14 +220,16 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
for (state = 0; ; state++) {
/* Retrieve the pinctrl-* property */
propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state);
- if (!propname)
- return -ENOMEM;
+ if (!propname) {
+ ret = -ENOMEM;
+ goto err;
+ }
prop = of_find_property(np, propname, &size);
kfree(propname);
if (!prop) {
if (state == 0) {
- of_node_put(np);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err;
}
break;
}
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index ac97724c59bae..4e87f5b875c0e 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -231,6 +231,7 @@ static const unsigned int byt_score_pins_map[BYT_NGPIO_SCORE] = {
/* SCORE groups */
static const unsigned int byt_score_uart1_pins[] = { 70, 71, 72, 73 };
static const unsigned int byt_score_uart2_pins[] = { 74, 75, 76, 77 };
+static const unsigned int byt_score_uart3_pins[] = { 57, 61 };
static const unsigned int byt_score_pwm0_pins[] = { 94 };
static const unsigned int byt_score_pwm1_pins[] = { 95 };
@@ -278,37 +279,38 @@ static const unsigned int byt_score_plt_clk5_pins[] = { 101 };
static const unsigned int byt_score_smbus_pins[] = { 51, 52, 53 };
static const struct intel_pingroup byt_score_groups[] = {
- PIN_GROUP("uart1_grp", byt_score_uart1_pins, 1),
- PIN_GROUP("uart2_grp", byt_score_uart2_pins, 1),
- PIN_GROUP("pwm0_grp", byt_score_pwm0_pins, 1),
- PIN_GROUP("pwm1_grp", byt_score_pwm1_pins, 1),
- PIN_GROUP("ssp2_grp", byt_score_ssp2_pins, 1),
- PIN_GROUP("sio_spi_grp", byt_score_sio_spi_pins, 1),
- PIN_GROUP("i2c5_grp", byt_score_i2c5_pins, 1),
- PIN_GROUP("i2c6_grp", byt_score_i2c6_pins, 1),
- PIN_GROUP("i2c4_grp", byt_score_i2c4_pins, 1),
- PIN_GROUP("i2c3_grp", byt_score_i2c3_pins, 1),
- PIN_GROUP("i2c2_grp", byt_score_i2c2_pins, 1),
- PIN_GROUP("i2c1_grp", byt_score_i2c1_pins, 1),
- PIN_GROUP("i2c0_grp", byt_score_i2c0_pins, 1),
- PIN_GROUP("ssp0_grp", byt_score_ssp0_pins, 1),
- PIN_GROUP("ssp1_grp", byt_score_ssp1_pins, 1),
- PIN_GROUP("sdcard_grp", byt_score_sdcard_pins, byt_score_sdcard_mux_values),
- PIN_GROUP("sdio_grp", byt_score_sdio_pins, 1),
- PIN_GROUP("emmc_grp", byt_score_emmc_pins, 1),
- PIN_GROUP("lpc_grp", byt_score_ilb_lpc_pins, 1),
- PIN_GROUP("sata_grp", byt_score_sata_pins, 1),
- PIN_GROUP("plt_clk0_grp", byt_score_plt_clk0_pins, 1),
- PIN_GROUP("plt_clk1_grp", byt_score_plt_clk1_pins, 1),
- PIN_GROUP("plt_clk2_grp", byt_score_plt_clk2_pins, 1),
- PIN_GROUP("plt_clk3_grp", byt_score_plt_clk3_pins, 1),
- PIN_GROUP("plt_clk4_grp", byt_score_plt_clk4_pins, 1),
- PIN_GROUP("plt_clk5_grp", byt_score_plt_clk5_pins, 1),
- PIN_GROUP("smbus_grp", byt_score_smbus_pins, 1),
+ PIN_GROUP_GPIO("uart1_grp", byt_score_uart1_pins, 1),
+ PIN_GROUP_GPIO("uart2_grp", byt_score_uart2_pins, 1),
+ PIN_GROUP_GPIO("uart3_grp", byt_score_uart3_pins, 1),
+ PIN_GROUP_GPIO("pwm0_grp", byt_score_pwm0_pins, 1),
+ PIN_GROUP_GPIO("pwm1_grp", byt_score_pwm1_pins, 1),
+ PIN_GROUP_GPIO("ssp2_grp", byt_score_ssp2_pins, 1),
+ PIN_GROUP_GPIO("sio_spi_grp", byt_score_sio_spi_pins, 1),
+ PIN_GROUP_GPIO("i2c5_grp", byt_score_i2c5_pins, 1),
+ PIN_GROUP_GPIO("i2c6_grp", byt_score_i2c6_pins, 1),
+ PIN_GROUP_GPIO("i2c4_grp", byt_score_i2c4_pins, 1),
+ PIN_GROUP_GPIO("i2c3_grp", byt_score_i2c3_pins, 1),
+ PIN_GROUP_GPIO("i2c2_grp", byt_score_i2c2_pins, 1),
+ PIN_GROUP_GPIO("i2c1_grp", byt_score_i2c1_pins, 1),
+ PIN_GROUP_GPIO("i2c0_grp", byt_score_i2c0_pins, 1),
+ PIN_GROUP_GPIO("ssp0_grp", byt_score_ssp0_pins, 1),
+ PIN_GROUP_GPIO("ssp1_grp", byt_score_ssp1_pins, 1),
+ PIN_GROUP_GPIO("sdcard_grp", byt_score_sdcard_pins, byt_score_sdcard_mux_values),
+ PIN_GROUP_GPIO("sdio_grp", byt_score_sdio_pins, 1),
+ PIN_GROUP_GPIO("emmc_grp", byt_score_emmc_pins, 1),
+ PIN_GROUP_GPIO("lpc_grp", byt_score_ilb_lpc_pins, 1),
+ PIN_GROUP_GPIO("sata_grp", byt_score_sata_pins, 1),
+ PIN_GROUP_GPIO("plt_clk0_grp", byt_score_plt_clk0_pins, 1),
+ PIN_GROUP_GPIO("plt_clk1_grp", byt_score_plt_clk1_pins, 1),
+ PIN_GROUP_GPIO("plt_clk2_grp", byt_score_plt_clk2_pins, 1),
+ PIN_GROUP_GPIO("plt_clk3_grp", byt_score_plt_clk3_pins, 1),
+ PIN_GROUP_GPIO("plt_clk4_grp", byt_score_plt_clk4_pins, 1),
+ PIN_GROUP_GPIO("plt_clk5_grp", byt_score_plt_clk5_pins, 1),
+ PIN_GROUP_GPIO("smbus_grp", byt_score_smbus_pins, 1),
};
static const char * const byt_score_uart_groups[] = {
- "uart1_grp", "uart2_grp",
+ "uart1_grp", "uart2_grp", "uart3_grp",
};
static const char * const byt_score_pwm_groups[] = {
"pwm0_grp", "pwm1_grp",
@@ -332,12 +334,14 @@ static const char * const byt_score_plt_clk_groups[] = {
};
static const char * const byt_score_smbus_groups[] = { "smbus_grp" };
static const char * const byt_score_gpio_groups[] = {
- "uart1_grp", "uart2_grp", "pwm0_grp", "pwm1_grp", "ssp0_grp",
- "ssp1_grp", "ssp2_grp", "sio_spi_grp", "i2c0_grp", "i2c1_grp",
- "i2c2_grp", "i2c3_grp", "i2c4_grp", "i2c5_grp", "i2c6_grp",
- "sdcard_grp", "sdio_grp", "emmc_grp", "lpc_grp", "sata_grp",
- "plt_clk0_grp", "plt_clk1_grp", "plt_clk2_grp", "plt_clk3_grp",
- "plt_clk4_grp", "plt_clk5_grp", "smbus_grp",
+ "uart1_grp_gpio", "uart2_grp_gpio", "uart3_grp_gpio", "pwm0_grp_gpio",
+ "pwm1_grp_gpio", "ssp0_grp_gpio", "ssp1_grp_gpio", "ssp2_grp_gpio",
+ "sio_spi_grp_gpio", "i2c0_grp_gpio", "i2c1_grp_gpio", "i2c2_grp_gpio",
+ "i2c3_grp_gpio", "i2c4_grp_gpio", "i2c5_grp_gpio", "i2c6_grp_gpio",
+ "sdcard_grp_gpio", "sdio_grp_gpio", "emmc_grp_gpio", "lpc_grp_gpio",
+ "sata_grp_gpio", "plt_clk0_grp_gpio", "plt_clk1_grp_gpio",
+ "plt_clk2_grp_gpio", "plt_clk3_grp_gpio", "plt_clk4_grp_gpio",
+ "plt_clk5_grp_gpio", "smbus_grp_gpio",
};
static const struct intel_function byt_score_functions[] = {
@@ -456,8 +460,8 @@ static const struct intel_pingroup byt_sus_groups[] = {
PIN_GROUP("usb_oc_grp_gpio", byt_sus_usb_over_current_pins, byt_sus_usb_over_current_gpio_mode_values),
PIN_GROUP("usb_ulpi_grp_gpio", byt_sus_usb_ulpi_pins, byt_sus_usb_ulpi_gpio_mode_values),
PIN_GROUP("pcu_spi_grp_gpio", byt_sus_pcu_spi_pins, byt_sus_pcu_spi_gpio_mode_values),
- PIN_GROUP("pmu_clk1_grp", byt_sus_pmu_clk1_pins, 1),
- PIN_GROUP("pmu_clk2_grp", byt_sus_pmu_clk2_pins, 1),
+ PIN_GROUP_GPIO("pmu_clk1_grp", byt_sus_pmu_clk1_pins, 1),
+ PIN_GROUP_GPIO("pmu_clk2_grp", byt_sus_pmu_clk2_pins, 1),
};
static const char * const byt_sus_usb_groups[] = {
@@ -469,7 +473,7 @@ static const char * const byt_sus_pmu_clk_groups[] = {
};
static const char * const byt_sus_gpio_groups[] = {
"usb_oc_grp_gpio", "usb_ulpi_grp_gpio", "pcu_spi_grp_gpio",
- "pmu_clk1_grp", "pmu_clk2_grp",
+ "pmu_clk1_grp_gpio", "pmu_clk2_grp_gpio",
};
static const struct intel_function byt_sus_functions[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index fde65e18cd145..6981e2fab93f3 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -179,6 +179,10 @@ struct intel_community {
.modes = __builtin_choose_expr(__builtin_constant_p((m)), NULL, (m)), \
}
+#define PIN_GROUP_GPIO(n, p, m) \
+ PIN_GROUP(n, p, m), \
+ PIN_GROUP(n "_gpio", p, 0)
+
#define FUNCTION(n, g) \
{ \
.func = PINCTRL_PINFUNCTION((n), (g), ARRAY_SIZE(g)), \
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index b6bc31abd2b06..b19bc391705ee 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -165,20 +165,21 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SR, &ret);
break;
case PIN_CONFIG_INPUT_ENABLE:
- case PIN_CONFIG_OUTPUT_ENABLE:
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_IES, &ret);
+ if (!ret)
+ err = -EINVAL;
+ break;
+ case PIN_CONFIG_OUTPUT:
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
if (err)
break;
- /* CONFIG Current direction return value
- * ------------- ----------------- ----------------------
- * OUTPUT_ENABLE output 1 (= HW value)
- * input 0 (= HW value)
- * INPUT_ENABLE output 0 (= reverse HW value)
- * input 1 (= reverse HW value)
- */
- if (param == PIN_CONFIG_INPUT_ENABLE)
- ret = !ret;
+ if (!ret) {
+ err = -EINVAL;
+ break;
+ }
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DO, &ret);
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
@@ -193,6 +194,8 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
}
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SMT, &ret);
+ if (!ret)
+ err = -EINVAL;
break;
case PIN_CONFIG_DRIVE_STRENGTH:
if (!hw->soc->drive_get)
@@ -281,26 +284,9 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break;
err = hw->soc->bias_set_combo(hw, desc, 0, arg);
break;
- case PIN_CONFIG_OUTPUT_ENABLE:
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT,
- MTK_DISABLE);
- /* Keep set direction to consider the case that a GPIO pin
- * does not have SMT control
- */
- if (err != -ENOTSUPP)
- break;
-
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
- MTK_OUTPUT);
- break;
case PIN_CONFIG_INPUT_ENABLE:
/* regard all non-zero value as enable */
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_IES, !!arg);
- if (err)
- break;
-
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
- MTK_INPUT);
break;
case PIN_CONFIG_SLEW_RATE:
/* regard all non-zero value as enable */
diff --git a/drivers/pinctrl/meson/pinctrl-meson-a1.c b/drivers/pinctrl/meson/pinctrl-meson-a1.c
index 79f5d753d7e1a..50a87d9618a8e 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-a1.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-a1.c
@@ -250,7 +250,7 @@ static const unsigned int pdm_dclk_x_pins[] = { GPIOX_10 };
static const unsigned int pdm_din2_a_pins[] = { GPIOA_6 };
static const unsigned int pdm_din1_a_pins[] = { GPIOA_7 };
static const unsigned int pdm_din0_a_pins[] = { GPIOA_8 };
-static const unsigned int pdm_dclk_pins[] = { GPIOA_9 };
+static const unsigned int pdm_dclk_a_pins[] = { GPIOA_9 };
/* gen_clk */
static const unsigned int gen_clk_x_pins[] = { GPIOX_7 };
@@ -591,7 +591,7 @@ static struct meson_pmx_group meson_a1_periphs_groups[] = {
GROUP(pdm_din2_a, 3),
GROUP(pdm_din1_a, 3),
GROUP(pdm_din0_a, 3),
- GROUP(pdm_dclk, 3),
+ GROUP(pdm_dclk_a, 3),
GROUP(pwm_c_a, 3),
GROUP(pwm_b_a, 3),
@@ -755,7 +755,7 @@ static const char * const spi_a_groups[] = {
static const char * const pdm_groups[] = {
"pdm_din0_x", "pdm_din1_x", "pdm_din2_x", "pdm_dclk_x", "pdm_din2_a",
- "pdm_din1_a", "pdm_din0_a", "pdm_dclk",
+ "pdm_din1_a", "pdm_din0_a", "pdm_dclk_a",
};
static const char * const gen_clk_groups[] = {
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index eb5a8c6542606..20425afc6b331 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -2045,7 +2045,9 @@ static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl)
for (unsigned int i = 0; i < RZG2L_TINT_MAX_INTERRUPT; i++) {
struct irq_data *data;
+ unsigned long flags;
unsigned int virq;
+ int ret;
if (!pctrl->hwirq[i])
continue;
@@ -2063,8 +2065,18 @@ static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl)
continue;
}
- if (!irqd_irq_disabled(data))
+ /*
+ * This has to be atomically executed to protect against a concurrent
+ * interrupt.
+ */
+ raw_spin_lock_irqsave(&pctrl->lock.rlock, flags);
+ ret = rzg2l_gpio_irq_set_type(data, irqd_get_trigger_type(data));
+ if (!ret && !irqd_irq_disabled(data))
rzg2l_gpio_irq_enable(data);
+ raw_spin_unlock_irqrestore(&pctrl->lock.rlock, flags);
+
+ if (ret)
+ dev_crit(pctrl->dev, "Failed to set IRQ type for virq=%u\n", virq);
}
}
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index 30951f7131cd9..1accdaaf282c5 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -721,6 +721,7 @@ static struct miscdevice isst_if_char_driver = {
static const struct x86_cpu_id hpm_cpu_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, NULL),
X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, NULL),
{}
};
diff --git a/drivers/power/supply/mt6360_charger.c b/drivers/power/supply/mt6360_charger.c
index 1305cba61edd4..aca123783efcc 100644
--- a/drivers/power/supply/mt6360_charger.c
+++ b/drivers/power/supply/mt6360_charger.c
@@ -588,7 +588,7 @@ static const struct regulator_ops mt6360_chg_otg_ops = {
};
static const struct regulator_desc mt6360_otg_rdesc = {
- .of_match = "usb-otg-vbus",
+ .of_match = "usb-otg-vbus-regulator",
.name = "usb-otg-vbus",
.ops = &mt6360_chg_otg_ops,
.owner = THIS_MODULE,
diff --git a/drivers/power/supply/rt9455_charger.c b/drivers/power/supply/rt9455_charger.c
index c345a77f9f78c..e4dbacd50a437 100644
--- a/drivers/power/supply/rt9455_charger.c
+++ b/drivers/power/supply/rt9455_charger.c
@@ -192,6 +192,7 @@ static const int rt9455_voreg_values[] = {
4450000, 4450000, 4450000, 4450000, 4450000, 4450000, 4450000, 4450000
};
+#if IS_ENABLED(CONFIG_USB_PHY)
/*
* When the charger is in boost mode, REG02[7:2] represent boost output
* voltage.
@@ -207,6 +208,7 @@ static const int rt9455_boost_voltage_values[] = {
5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000,
5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000,
};
+#endif
/* REG07[3:0] (VMREG) in uV */
static const int rt9455_vmreg_values[] = {
diff --git a/drivers/regulator/irq_helpers.c b/drivers/regulator/irq_helpers.c
index fe7ae0f3f46af..5ab1a0befe12f 100644
--- a/drivers/regulator/irq_helpers.c
+++ b/drivers/regulator/irq_helpers.c
@@ -352,6 +352,9 @@ void *regulator_irq_helper(struct device *dev,
h->irq = irq;
h->desc = *d;
+ h->desc.name = devm_kstrdup(dev, d->name, GFP_KERNEL);
+ if (!h->desc.name)
+ return ERR_PTR(-ENOMEM);
ret = init_rdev_state(dev, h, rdev, common_errs, per_rdev_errs,
rdev_amount);
diff --git a/drivers/regulator/mt6360-regulator.c b/drivers/regulator/mt6360-regulator.c
index ad6587a378d09..24cc9fc94e900 100644
--- a/drivers/regulator/mt6360-regulator.c
+++ b/drivers/regulator/mt6360-regulator.c
@@ -319,15 +319,15 @@ static unsigned int mt6360_regulator_of_map_mode(unsigned int hw_mode)
}
}
-#define MT6360_REGULATOR_DESC(_name, _sname, ereg, emask, vreg, vmask, \
- mreg, mmask, streg, stmask, vranges, \
- vcnts, offon_delay, irq_tbls) \
+#define MT6360_REGULATOR_DESC(match, _name, _sname, ereg, emask, vreg, \
+ vmask, mreg, mmask, streg, stmask, \
+ vranges, vcnts, offon_delay, irq_tbls) \
{ \
.desc = { \
.name = #_name, \
.supply_name = #_sname, \
.id = MT6360_REGULATOR_##_name, \
- .of_match = of_match_ptr(#_name), \
+ .of_match = of_match_ptr(match), \
.regulators_node = of_match_ptr("regulator"), \
.of_map_mode = mt6360_regulator_of_map_mode, \
.owner = THIS_MODULE, \
@@ -351,21 +351,29 @@ static unsigned int mt6360_regulator_of_map_mode(unsigned int hw_mode)
}
static const struct mt6360_regulator_desc mt6360_regulator_descs[] = {
- MT6360_REGULATOR_DESC(BUCK1, BUCK1_VIN, 0x117, 0x40, 0x110, 0xff, 0x117, 0x30, 0x117, 0x04,
+ MT6360_REGULATOR_DESC("buck1", BUCK1, BUCK1_VIN,
+ 0x117, 0x40, 0x110, 0xff, 0x117, 0x30, 0x117, 0x04,
buck_vout_ranges, 256, 0, buck1_irq_tbls),
- MT6360_REGULATOR_DESC(BUCK2, BUCK2_VIN, 0x127, 0x40, 0x120, 0xff, 0x127, 0x30, 0x127, 0x04,
+ MT6360_REGULATOR_DESC("buck2", BUCK2, BUCK2_VIN,
+ 0x127, 0x40, 0x120, 0xff, 0x127, 0x30, 0x127, 0x04,
buck_vout_ranges, 256, 0, buck2_irq_tbls),
- MT6360_REGULATOR_DESC(LDO6, LDO_VIN3, 0x137, 0x40, 0x13B, 0xff, 0x137, 0x30, 0x137, 0x04,
+ MT6360_REGULATOR_DESC("ldo6", LDO6, LDO_VIN3,
+ 0x137, 0x40, 0x13B, 0xff, 0x137, 0x30, 0x137, 0x04,
ldo_vout_ranges1, 256, 0, ldo6_irq_tbls),
- MT6360_REGULATOR_DESC(LDO7, LDO_VIN3, 0x131, 0x40, 0x135, 0xff, 0x131, 0x30, 0x131, 0x04,
+ MT6360_REGULATOR_DESC("ldo7", LDO7, LDO_VIN3,
+ 0x131, 0x40, 0x135, 0xff, 0x131, 0x30, 0x131, 0x04,
ldo_vout_ranges1, 256, 0, ldo7_irq_tbls),
- MT6360_REGULATOR_DESC(LDO1, LDO_VIN1, 0x217, 0x40, 0x21B, 0xff, 0x217, 0x30, 0x217, 0x04,
+ MT6360_REGULATOR_DESC("ldo1", LDO1, LDO_VIN1,
+ 0x217, 0x40, 0x21B, 0xff, 0x217, 0x30, 0x217, 0x04,
ldo_vout_ranges2, 256, 0, ldo1_irq_tbls),
- MT6360_REGULATOR_DESC(LDO2, LDO_VIN1, 0x211, 0x40, 0x215, 0xff, 0x211, 0x30, 0x211, 0x04,
+ MT6360_REGULATOR_DESC("ldo2", LDO2, LDO_VIN1,
+ 0x211, 0x40, 0x215, 0xff, 0x211, 0x30, 0x211, 0x04,
ldo_vout_ranges2, 256, 0, ldo2_irq_tbls),
- MT6360_REGULATOR_DESC(LDO3, LDO_VIN1, 0x205, 0x40, 0x209, 0xff, 0x205, 0x30, 0x205, 0x04,
+ MT6360_REGULATOR_DESC("ldo3", LDO3, LDO_VIN1,
+ 0x205, 0x40, 0x209, 0xff, 0x205, 0x30, 0x205, 0x04,
ldo_vout_ranges2, 256, 100, ldo3_irq_tbls),
- MT6360_REGULATOR_DESC(LDO5, LDO_VIN2, 0x20B, 0x40, 0x20F, 0x7f, 0x20B, 0x30, 0x20B, 0x04,
+ MT6360_REGULATOR_DESC("ldo5", LDO5, LDO_VIN2,
+ 0x20B, 0x40, 0x20F, 0x7f, 0x20B, 0x30, 0x20B, 0x04,
ldo_vout_ranges3, 128, 100, ldo5_irq_tbls),
};
diff --git a/drivers/regulator/qcom-refgen-regulator.c b/drivers/regulator/qcom-refgen-regulator.c
index 656fe330d38f0..063e12c08e75f 100644
--- a/drivers/regulator/qcom-refgen-regulator.c
+++ b/drivers/regulator/qcom-refgen-regulator.c
@@ -140,6 +140,7 @@ static const struct of_device_id qcom_refgen_match_table[] = {
{ .compatible = "qcom,sm8250-refgen-regulator", .data = &sm8250_refgen_desc },
{ }
};
+MODULE_DEVICE_TABLE(of, qcom_refgen_match_table);
static struct platform_driver qcom_refgen_driver = {
.probe = qcom_refgen_probe,
diff --git a/drivers/regulator/vqmmc-ipq4019-regulator.c b/drivers/regulator/vqmmc-ipq4019-regulator.c
index 086da36abc0b4..4955616517ce9 100644
--- a/drivers/regulator/vqmmc-ipq4019-regulator.c
+++ b/drivers/regulator/vqmmc-ipq4019-regulator.c
@@ -84,6 +84,7 @@ static const struct of_device_id regulator_ipq4019_of_match[] = {
{ .compatible = "qcom,vqmmc-ipq4019-regulator", },
{},
};
+MODULE_DEVICE_TABLE(of, regulator_ipq4019_of_match);
static struct platform_driver ipq4019_regulator_driver = {
.probe = ipq4019_regulator_probe,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f0b8b709649f2..a3adaec5504e4 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -364,30 +364,33 @@ out:
return rc;
}
+static void qeth_free_cq(struct qeth_card *card)
+{
+ if (card->qdio.c_q) {
+ qeth_free_qdio_queue(card->qdio.c_q);
+ card->qdio.c_q = NULL;
+ }
+}
+
static int qeth_alloc_cq(struct qeth_card *card)
{
if (card->options.cq == QETH_CQ_ENABLED) {
QETH_CARD_TEXT(card, 2, "cqon");
- card->qdio.c_q = qeth_alloc_qdio_queue();
if (!card->qdio.c_q) {
- dev_err(&card->gdev->dev, "Failed to create completion queue\n");
- return -ENOMEM;
+ card->qdio.c_q = qeth_alloc_qdio_queue();
+ if (!card->qdio.c_q) {
+ dev_err(&card->gdev->dev,
+ "Failed to create completion queue\n");
+ return -ENOMEM;
+ }
}
} else {
QETH_CARD_TEXT(card, 2, "nocq");
- card->qdio.c_q = NULL;
+ qeth_free_cq(card);
}
return 0;
}
-static void qeth_free_cq(struct qeth_card *card)
-{
- if (card->qdio.c_q) {
- qeth_free_qdio_queue(card->qdio.c_q);
- card->qdio.c_q = NULL;
- }
-}
-
static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
int delayed)
{
@@ -2628,6 +2631,10 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
QETH_CARD_TEXT(card, 2, "allcqdbf");
+ /* completion */
+ if (qeth_alloc_cq(card))
+ goto out_err;
+
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
return 0;
@@ -2663,10 +2670,6 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
}
- /* completion */
- if (qeth_alloc_cq(card))
- goto out_freeoutq;
-
return 0;
out_freeoutq:
@@ -2677,6 +2680,8 @@ out_freeoutq:
qeth_free_buffer_pool(card);
out_buffer_pool:
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
+ qeth_free_cq(card);
+out_err:
return -ENOMEM;
}
@@ -2684,11 +2689,12 @@ static void qeth_free_qdio_queues(struct qeth_card *card)
{
int i, j;
+ qeth_free_cq(card);
+
if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
QETH_QDIO_UNINITIALIZED)
return;
- qeth_free_cq(card);
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
if (card->qdio.in_q->bufs[j].rx_skb) {
consume_skb(card->qdio.in_q->bufs[j].rx_skb);
@@ -3742,24 +3748,11 @@ static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
{
- int rc;
-
- if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
- rc = -1;
- goto out;
- } else {
- if (card->options.cq == cq) {
- rc = 0;
- goto out;
- }
-
- qeth_free_qdio_queues(card);
- card->options.cq = cq;
- rc = 0;
- }
-out:
- return rc;
+ if (card->options.cq == QETH_CQ_NOTAVAILABLE)
+ return -1;
+ card->options.cq = cq;
+ return 0;
}
EXPORT_SYMBOL_GPL(qeth_configure_cq);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 58fdf679341dc..65cdc8b77e358 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3120,6 +3120,7 @@ static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
{
struct scsi_device *sdp = sdkp->device;
const struct scsi_io_group_descriptor *desc, *start, *end;
+ u16 permanent_stream_count_old;
struct scsi_sense_hdr sshdr;
struct scsi_mode_data data;
int res;
@@ -3140,12 +3141,13 @@ static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
for (desc = start; desc < end; desc++)
if (!desc->st_enble || !sd_is_perm_stream(sdkp, desc - start))
break;
+ permanent_stream_count_old = sdkp->permanent_stream_count;
sdkp->permanent_stream_count = desc - start;
if (sdkp->rscs && sdkp->permanent_stream_count < 2)
sd_printk(KERN_INFO, sdkp,
"Unexpected: RSCS has been set and the permanent stream count is %u\n",
sdkp->permanent_stream_count);
- else if (sdkp->permanent_stream_count)
+ else if (sdkp->permanent_stream_count != permanent_stream_count_old)
sd_printk(KERN_INFO, sdkp, "permanent stream count = %d\n",
sdkp->permanent_stream_count);
}
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 50c664b65f4d4..1b7afb19ccd63 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -72,6 +72,7 @@ config MTK_SOCINFO
tristate "MediaTek SoC Information"
default y
depends on NVMEM_MTK_EFUSE
+ select SOC_BUS
help
The MediaTek SoC Information (mtk-socinfo) driver provides
information about the SoC to the userspace including the
diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
index c832f5c670bcf..9a91298c12539 100644
--- a/drivers/soc/mediatek/mtk-svs.c
+++ b/drivers/soc/mediatek/mtk-svs.c
@@ -1768,6 +1768,7 @@ static int svs_bank_resource_setup(struct svs_platform *svsp)
const struct svs_bank_pdata *bdata;
struct svs_bank *svsb;
struct dev_pm_opp *opp;
+ char tz_name_buf[20];
unsigned long freq;
int count, ret;
u32 idx, i;
@@ -1819,10 +1820,12 @@ static int svs_bank_resource_setup(struct svs_platform *svsp)
}
if (!IS_ERR_OR_NULL(bdata->tzone_name)) {
- svsb->tzd = thermal_zone_get_zone_by_name(bdata->tzone_name);
+ snprintf(tz_name_buf, ARRAY_SIZE(tz_name_buf),
+ "%s-thermal", bdata->tzone_name);
+ svsb->tzd = thermal_zone_get_zone_by_name(tz_name_buf);
if (IS_ERR(svsb->tzd)) {
dev_err(svsb->dev, "cannot get \"%s\" thermal zone\n",
- bdata->tzone_name);
+ tz_name_buf);
return PTR_ERR(svsb->tzd);
}
}
diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
index 7cd24bd8e2248..6bcf8e75273c4 100644
--- a/drivers/soundwire/amd_manager.c
+++ b/drivers/soundwire/amd_manager.c
@@ -130,6 +130,19 @@ static void amd_sdw_set_frameshape(struct amd_sdw_manager *amd_manager)
writel(frame_size, amd_manager->mmio + ACP_SW_FRAMESIZE);
}
+static void amd_sdw_wake_enable(struct amd_sdw_manager *amd_manager, bool enable)
+{
+ u32 wake_ctrl;
+
+ wake_ctrl = readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
+ if (enable)
+ wake_ctrl |= AMD_SDW_WAKE_INTR_MASK;
+ else
+ wake_ctrl &= ~AMD_SDW_WAKE_INTR_MASK;
+
+ writel(wake_ctrl, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
+}
+
static void amd_sdw_ctl_word_prep(u32 *lower_word, u32 *upper_word, struct sdw_msg *msg,
int cmd_offset)
{
@@ -1095,6 +1108,7 @@ static int __maybe_unused amd_suspend(struct device *dev)
}
if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
+ amd_sdw_wake_enable(amd_manager, false);
return amd_sdw_clock_stop(amd_manager);
} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
/*
@@ -1121,6 +1135,7 @@ static int __maybe_unused amd_suspend_runtime(struct device *dev)
return 0;
}
if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
+ amd_sdw_wake_enable(amd_manager, true);
return amd_sdw_clock_stop(amd_manager);
} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
ret = amd_sdw_clock_stop(amd_manager);
diff --git a/drivers/soundwire/amd_manager.h b/drivers/soundwire/amd_manager.h
index 418b679e0b1a6..707065468e05a 100644
--- a/drivers/soundwire/amd_manager.h
+++ b/drivers/soundwire/amd_manager.h
@@ -152,7 +152,7 @@
#define AMD_SDW0_EXT_INTR_MASK 0x200000
#define AMD_SDW1_EXT_INTR_MASK 4
#define AMD_SDW_IRQ_MASK_0TO7 0x77777777
-#define AMD_SDW_IRQ_MASK_8TO11 0x000d7777
+#define AMD_SDW_IRQ_MASK_8TO11 0x000c7777
#define AMD_SDW_IRQ_ERROR_MASK 0xff
#define AMD_SDW_MAX_FREQ_NUM 1
#define AMD_SDW0_MAX_TX_PORTS 3
@@ -190,6 +190,7 @@
#define AMD_SDW_CLK_RESUME_REQ 2
#define AMD_SDW_CLK_RESUME_DONE 3
#define AMD_SDW_WAKE_STAT_MASK BIT(16)
+#define AMD_SDW_WAKE_INTR_MASK BIT(16)
static u32 amd_sdw_freq_tbl[AMD_SDW_MAX_FREQ_NUM] = {
AMD_SDW_DEFAULT_CLK_FREQ,
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index b246067e074bc..6cb96a1e8b7df 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -967,7 +967,7 @@ vdpa_dev_blk_seg_size_config_fill(struct sk_buff *msg, u64 features,
val_u32 = __virtio32_to_cpu(true, config->size_max);
- return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_SEG_SIZE, val_u32);
+ return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_SIZE_MAX, val_u32);
}
/* fill the block size*/
@@ -1089,7 +1089,7 @@ static int vdpa_dev_blk_ro_config_fill(struct sk_buff *msg, u64 features)
u8 ro;
ro = ((features & BIT_ULL(VIRTIO_BLK_F_RO)) == 0) ? 0 : 1;
- if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_READ_ONLY, ro))
+ if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_READ_ONLY, ro))
return -EMSGSIZE;
return 0;
@@ -1100,7 +1100,7 @@ static int vdpa_dev_blk_flush_config_fill(struct sk_buff *msg, u64 features)
u8 flush;
flush = ((features & BIT_ULL(VIRTIO_BLK_F_FLUSH)) == 0) ? 0 : 1;
- if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_FLUSH, flush))
+ if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_FLUSH, flush))
return -EMSGSIZE;
return 0;
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index dae96c9f61cf8..806ecd32219b6 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -196,7 +196,7 @@ err_mutex_unlock:
*/
static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
{
- unsigned long offset = vmf->address - vmf->vma->vm_start;
+ unsigned long offset = vmf->pgoff << PAGE_SHIFT;
struct page *page = vmf->page;
file_update_time(vmf->vma->vm_file);
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 9defa12208f98..1775fcc7f0e8e 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -179,13 +179,14 @@ extern int v9fs_vfs_rename(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags);
-extern struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid);
+extern struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid,
+ bool new);
extern const struct inode_operations v9fs_dir_inode_operations_dotl;
extern const struct inode_operations v9fs_file_inode_operations_dotl;
extern const struct inode_operations v9fs_symlink_inode_operations_dotl;
extern const struct netfs_request_ops v9fs_req_ops;
extern struct inode *v9fs_fid_iget_dotl(struct super_block *sb,
- struct p9_fid *fid);
+ struct p9_fid *fid, bool new);
/* other default globals */
#define V9FS_PORT 564
@@ -224,12 +225,12 @@ static inline int v9fs_proto_dotl(struct v9fs_session_info *v9ses)
*/
static inline struct inode *
v9fs_get_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
- struct super_block *sb)
+ struct super_block *sb, bool new)
{
if (v9fs_proto_dotl(v9ses))
- return v9fs_fid_iget_dotl(sb, fid);
+ return v9fs_fid_iget_dotl(sb, fid, new);
else
- return v9fs_fid_iget(sb, fid);
+ return v9fs_fid_iget(sb, fid, new);
}
#endif
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 47bd77199e20c..7a3308d776060 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -364,7 +364,8 @@ void v9fs_evict_inode(struct inode *inode)
clear_inode(inode);
}
-struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid)
+struct inode *
+v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid, bool new)
{
dev_t rdev;
int retval;
@@ -376,8 +377,18 @@ struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid)
inode = iget_locked(sb, QID2INO(&fid->qid));
if (unlikely(!inode))
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
- return inode;
+ if (!(inode->i_state & I_NEW)) {
+ if (!new) {
+ goto done;
+ } else {
+ p9_debug(P9_DEBUG_VFS, "WARNING: Inode collision %ld\n",
+ inode->i_ino);
+ iput(inode);
+ remove_inode_hash(inode);
+ inode = iget_locked(sb, QID2INO(&fid->qid));
+ WARN_ON(!(inode->i_state & I_NEW));
+ }
+ }
/*
* initialize the inode with the stat info
@@ -401,11 +412,11 @@ struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid)
v9fs_set_netfs_context(inode);
v9fs_cache_inode_get_cookie(inode);
unlock_new_inode(inode);
+done:
return inode;
error:
iget_failed(inode);
return ERR_PTR(retval);
-
}
/**
@@ -437,8 +448,15 @@ static int v9fs_at_to_dotl_flags(int flags)
*/
static void v9fs_dec_count(struct inode *inode)
{
- if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
- drop_nlink(inode);
+ if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2) {
+ if (inode->i_nlink) {
+ drop_nlink(inode);
+ } else {
+ p9_debug(P9_DEBUG_VFS,
+ "WARNING: unexpected i_nlink zero %d inode %ld\n",
+ inode->i_nlink, inode->i_ino);
+ }
+ }
}
/**
@@ -489,6 +507,9 @@ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags)
} else
v9fs_dec_count(inode);
+ if (inode->i_nlink <= 0) /* no more refs unhash it */
+ remove_inode_hash(inode);
+
v9fs_invalidate_inode_attr(inode);
v9fs_invalidate_inode_attr(dir);
@@ -554,7 +575,7 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
/*
* instantiate inode and assign the unopened fid to the dentry
*/
- inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
+ inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb, true);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS,
@@ -683,7 +704,7 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
else if (IS_ERR(fid))
inode = ERR_CAST(fid);
else
- inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
+ inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb, false);
/*
* If we had a rename on the server and a parallel lookup
* for the new name, then make sure we instantiate with
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 55dde186041a3..c61b97bd13b9a 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -52,7 +52,10 @@ static kgid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
return current_fsgid();
}
-struct inode *v9fs_fid_iget_dotl(struct super_block *sb, struct p9_fid *fid)
+
+
+struct inode *
+v9fs_fid_iget_dotl(struct super_block *sb, struct p9_fid *fid, bool new)
{
int retval;
struct inode *inode;
@@ -62,8 +65,18 @@ struct inode *v9fs_fid_iget_dotl(struct super_block *sb, struct p9_fid *fid)
inode = iget_locked(sb, QID2INO(&fid->qid));
if (unlikely(!inode))
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
- return inode;
+ if (!(inode->i_state & I_NEW)) {
+ if (!new) {
+ goto done;
+ } else { /* deal with race condition in inode number reuse */
+ p9_debug(P9_DEBUG_ERROR, "WARNING: Inode collision %lx\n",
+ inode->i_ino);
+ iput(inode);
+ remove_inode_hash(inode);
+ inode = iget_locked(sb, QID2INO(&fid->qid));
+ WARN_ON(!(inode->i_state & I_NEW));
+ }
+ }
/*
* initialize the inode with the stat info
@@ -90,12 +103,11 @@ struct inode *v9fs_fid_iget_dotl(struct super_block *sb, struct p9_fid *fid)
goto error;
unlock_new_inode(inode);
-
+done:
return inode;
error:
iget_failed(inode);
return ERR_PTR(retval);
-
}
struct dotl_openflag_map {
@@ -247,7 +259,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
goto out;
}
- inode = v9fs_fid_iget_dotl(dir->i_sb, fid);
+ inode = v9fs_fid_iget_dotl(dir->i_sb, fid, true);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", err);
@@ -340,7 +352,7 @@ static int v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
}
/* instantiate inode and assign the unopened fid to the dentry */
- inode = v9fs_fid_iget_dotl(dir->i_sb, fid);
+ inode = v9fs_fid_iget_dotl(dir->i_sb, fid, true);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
@@ -776,7 +788,7 @@ v9fs_vfs_mknod_dotl(struct mnt_idmap *idmap, struct inode *dir,
err);
goto error;
}
- inode = v9fs_fid_iget_dotl(dir->i_sb, fid);
+ inode = v9fs_fid_iget_dotl(dir->i_sb, fid, true);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 55e67e36ae682..f52fdf42945cf 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -139,7 +139,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
else
sb->s_d_op = &v9fs_dentry_operations;
- inode = v9fs_get_inode_from_fid(v9ses, fid, sb);
+ inode = v9fs_get_inode_from_fid(v9ses, fid, sb, true);
if (IS_ERR(inode)) {
retval = PTR_ERR(inode);
goto release_sb;
diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c
index c60794264da28..45cb8149d374c 100644
--- a/fs/bcachefs/btree_node_scan.c
+++ b/fs/bcachefs/btree_node_scan.c
@@ -57,13 +57,14 @@ static void found_btree_node_to_key(struct bkey_i *k, const struct found_btree_n
bp->v.seq = cpu_to_le64(f->cookie);
bp->v.sectors_written = 0;
bp->v.flags = 0;
+ bp->v.sectors_written = cpu_to_le16(f->sectors_written);
bp->v.min_key = f->min_key;
SET_BTREE_PTR_RANGE_UPDATED(&bp->v, f->range_updated);
memcpy(bp->v.start, f->ptrs, sizeof(struct bch_extent_ptr) * f->nr_ptrs);
}
static bool found_btree_node_is_readable(struct btree_trans *trans,
- const struct found_btree_node *f)
+ struct found_btree_node *f)
{
struct { __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX); } k;
@@ -71,8 +72,10 @@ static bool found_btree_node_is_readable(struct btree_trans *trans,
struct btree *b = bch2_btree_node_get_noiter(trans, &k.k, f->btree_id, f->level, false);
bool ret = !IS_ERR_OR_NULL(b);
- if (ret)
+ if (ret) {
+ f->sectors_written = b->written;
six_unlock_read(&b->c.lock);
+ }
/*
* We might update this node's range; if that happens, we need the node
diff --git a/fs/bcachefs/btree_node_scan_types.h b/fs/bcachefs/btree_node_scan_types.h
index abb7b27d556a9..5cfaeb5ac831b 100644
--- a/fs/bcachefs/btree_node_scan_types.h
+++ b/fs/bcachefs/btree_node_scan_types.h
@@ -9,6 +9,7 @@ struct found_btree_node {
bool overwritten:1;
u8 btree_id;
u8 level;
+ unsigned sectors_written;
u32 seq;
u64 cookie;
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 941401a210f56..82f179258867b 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -525,7 +525,6 @@ int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
"different types of data in same bucket: %s, %s",
bch2_data_type_str(g->data_type),
bch2_data_type_str(data_type))) {
- BUG();
ret = -EIO;
goto err;
}
@@ -629,7 +628,6 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
bch2_data_type_str(ptr_data_type),
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- BUG();
ret = -EIO;
goto err;
}
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index ca4a066e9a542..0f95d7fb5ec0b 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -606,7 +606,7 @@ int bch2_trigger_inode(struct btree_trans *trans,
struct bkey_s new,
unsigned flags)
{
- s64 nr = bkey_is_inode(new.k) - bkey_is_inode(old.k);
+ s64 nr = (s64) bkey_is_inode(new.k) - (s64) bkey_is_inode(old.k);
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
if (nr) {
diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
index 8aff1a724805a..62da538d91cbd 100644
--- a/fs/erofs/fscache.c
+++ b/fs/erofs/fscache.c
@@ -151,7 +151,7 @@ static int erofs_fscache_read_io_async(struct fscache_cookie *cookie,
if (WARN_ON(len == 0))
source = NETFS_INVALID_READ;
if (source != NETFS_READ_FROM_CACHE) {
- erofs_err(NULL, "prepare_read failed (source %d)", source);
+ erofs_err(NULL, "prepare_ondemand_read failed (source %d)", source);
return -EIO;
}
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 39c67119f43bf..d28ccfc0352b1 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -84,13 +84,6 @@ struct erofs_dev_context {
bool flatdev;
};
-struct erofs_fs_context {
- struct erofs_mount_opts opt;
- struct erofs_dev_context *devs;
- char *fsid;
- char *domain_id;
-};
-
/* all filesystem-wide lz4 configurations */
struct erofs_sb_lz4_info {
/* # of pages needed for EROFS lz4 rolling decompression */
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index c0eb139adb07a..30b49b2eee534 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -370,18 +370,18 @@ out:
return ret;
}
-static void erofs_default_options(struct erofs_fs_context *ctx)
+static void erofs_default_options(struct erofs_sb_info *sbi)
{
#ifdef CONFIG_EROFS_FS_ZIP
- ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
- ctx->opt.max_sync_decompress_pages = 3;
- ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
+ sbi->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
+ sbi->opt.max_sync_decompress_pages = 3;
+ sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
#endif
#ifdef CONFIG_EROFS_FS_XATTR
- set_opt(&ctx->opt, XATTR_USER);
+ set_opt(&sbi->opt, XATTR_USER);
#endif
#ifdef CONFIG_EROFS_FS_POSIX_ACL
- set_opt(&ctx->opt, POSIX_ACL);
+ set_opt(&sbi->opt, POSIX_ACL);
#endif
}
@@ -426,16 +426,16 @@ static const struct fs_parameter_spec erofs_fs_parameters[] = {
static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
{
#ifdef CONFIG_FS_DAX
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *sbi = fc->s_fs_info;
switch (mode) {
case EROFS_MOUNT_DAX_ALWAYS:
- set_opt(&ctx->opt, DAX_ALWAYS);
- clear_opt(&ctx->opt, DAX_NEVER);
+ set_opt(&sbi->opt, DAX_ALWAYS);
+ clear_opt(&sbi->opt, DAX_NEVER);
return true;
case EROFS_MOUNT_DAX_NEVER:
- set_opt(&ctx->opt, DAX_NEVER);
- clear_opt(&ctx->opt, DAX_ALWAYS);
+ set_opt(&sbi->opt, DAX_NEVER);
+ clear_opt(&sbi->opt, DAX_ALWAYS);
return true;
default:
DBG_BUGON(1);
@@ -450,7 +450,7 @@ static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
static int erofs_fc_parse_param(struct fs_context *fc,
struct fs_parameter *param)
{
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *sbi = fc->s_fs_info;
struct fs_parse_result result;
struct erofs_device_info *dif;
int opt, ret;
@@ -463,9 +463,9 @@ static int erofs_fc_parse_param(struct fs_context *fc,
case Opt_user_xattr:
#ifdef CONFIG_EROFS_FS_XATTR
if (result.boolean)
- set_opt(&ctx->opt, XATTR_USER);
+ set_opt(&sbi->opt, XATTR_USER);
else
- clear_opt(&ctx->opt, XATTR_USER);
+ clear_opt(&sbi->opt, XATTR_USER);
#else
errorfc(fc, "{,no}user_xattr options not supported");
#endif
@@ -473,16 +473,16 @@ static int erofs_fc_parse_param(struct fs_context *fc,
case Opt_acl:
#ifdef CONFIG_EROFS_FS_POSIX_ACL
if (result.boolean)
- set_opt(&ctx->opt, POSIX_ACL);
+ set_opt(&sbi->opt, POSIX_ACL);
else
- clear_opt(&ctx->opt, POSIX_ACL);
+ clear_opt(&sbi->opt, POSIX_ACL);
#else
errorfc(fc, "{,no}acl options not supported");
#endif
break;
case Opt_cache_strategy:
#ifdef CONFIG_EROFS_FS_ZIP
- ctx->opt.cache_strategy = result.uint_32;
+ sbi->opt.cache_strategy = result.uint_32;
#else
errorfc(fc, "compression not supported, cache_strategy ignored");
#endif
@@ -504,27 +504,27 @@ static int erofs_fc_parse_param(struct fs_context *fc,
kfree(dif);
return -ENOMEM;
}
- down_write(&ctx->devs->rwsem);
- ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL);
- up_write(&ctx->devs->rwsem);
+ down_write(&sbi->devs->rwsem);
+ ret = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL);
+ up_write(&sbi->devs->rwsem);
if (ret < 0) {
kfree(dif->path);
kfree(dif);
return ret;
}
- ++ctx->devs->extra_devices;
+ ++sbi->devs->extra_devices;
break;
#ifdef CONFIG_EROFS_FS_ONDEMAND
case Opt_fsid:
- kfree(ctx->fsid);
- ctx->fsid = kstrdup(param->string, GFP_KERNEL);
- if (!ctx->fsid)
+ kfree(sbi->fsid);
+ sbi->fsid = kstrdup(param->string, GFP_KERNEL);
+ if (!sbi->fsid)
return -ENOMEM;
break;
case Opt_domain_id:
- kfree(ctx->domain_id);
- ctx->domain_id = kstrdup(param->string, GFP_KERNEL);
- if (!ctx->domain_id)
+ kfree(sbi->domain_id);
+ sbi->domain_id = kstrdup(param->string, GFP_KERNEL);
+ if (!sbi->domain_id)
return -ENOMEM;
break;
#else
@@ -581,8 +581,7 @@ static const struct export_operations erofs_export_ops = {
static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct inode *inode;
- struct erofs_sb_info *sbi;
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
int err;
sb->s_magic = EROFS_SUPER_MAGIC;
@@ -590,19 +589,6 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_op = &erofs_sops;
- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
- if (!sbi)
- return -ENOMEM;
-
- sb->s_fs_info = sbi;
- sbi->opt = ctx->opt;
- sbi->devs = ctx->devs;
- ctx->devs = NULL;
- sbi->fsid = ctx->fsid;
- ctx->fsid = NULL;
- sbi->domain_id = ctx->domain_id;
- ctx->domain_id = NULL;
-
sbi->blkszbits = PAGE_SHIFT;
if (erofs_is_fscache_mode(sb)) {
sb->s_blocksize = PAGE_SIZE;
@@ -706,9 +692,9 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
static int erofs_fc_get_tree(struct fs_context *fc)
{
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *sbi = fc->s_fs_info;
- if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->fsid)
+ if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid)
return get_tree_nodev(fc, erofs_fc_fill_super);
return get_tree_bdev(fc, erofs_fc_fill_super);
@@ -718,19 +704,19 @@ static int erofs_fc_reconfigure(struct fs_context *fc)
{
struct super_block *sb = fc->root->d_sb;
struct erofs_sb_info *sbi = EROFS_SB(sb);
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *new_sbi = fc->s_fs_info;
DBG_BUGON(!sb_rdonly(sb));
- if (ctx->fsid || ctx->domain_id)
+ if (new_sbi->fsid || new_sbi->domain_id)
erofs_info(sb, "ignoring reconfiguration for fsid|domain_id.");
- if (test_opt(&ctx->opt, POSIX_ACL))
+ if (test_opt(&new_sbi->opt, POSIX_ACL))
fc->sb_flags |= SB_POSIXACL;
else
fc->sb_flags &= ~SB_POSIXACL;
- sbi->opt = ctx->opt;
+ sbi->opt = new_sbi->opt;
fc->sb_flags |= SB_RDONLY;
return 0;
@@ -761,12 +747,15 @@ static void erofs_free_dev_context(struct erofs_dev_context *devs)
static void erofs_fc_free(struct fs_context *fc)
{
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *sbi = fc->s_fs_info;
- erofs_free_dev_context(ctx->devs);
- kfree(ctx->fsid);
- kfree(ctx->domain_id);
- kfree(ctx);
+ if (!sbi)
+ return;
+
+ erofs_free_dev_context(sbi->devs);
+ kfree(sbi->fsid);
+ kfree(sbi->domain_id);
+ kfree(sbi);
}
static const struct fs_context_operations erofs_context_ops = {
@@ -778,38 +767,35 @@ static const struct fs_context_operations erofs_context_ops = {
static int erofs_init_fs_context(struct fs_context *fc)
{
- struct erofs_fs_context *ctx;
+ struct erofs_sb_info *sbi;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
+ sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+ if (!sbi)
return -ENOMEM;
- ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
- if (!ctx->devs) {
- kfree(ctx);
+
+ sbi->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
+ if (!sbi->devs) {
+ kfree(sbi);
return -ENOMEM;
}
- fc->fs_private = ctx;
+ fc->s_fs_info = sbi;
- idr_init(&ctx->devs->tree);
- init_rwsem(&ctx->devs->rwsem);
- erofs_default_options(ctx);
+ idr_init(&sbi->devs->tree);
+ init_rwsem(&sbi->devs->rwsem);
+ erofs_default_options(sbi);
fc->ops = &erofs_context_ops;
return 0;
}
static void erofs_kill_sb(struct super_block *sb)
{
- struct erofs_sb_info *sbi;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
- if (erofs_is_fscache_mode(sb))
+ if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid)
kill_anon_super(sb);
else
kill_block_super(sb);
- sbi = EROFS_SB(sb);
- if (!sbi)
- return;
-
erofs_free_dev_context(sbi->devs);
fs_put_dax(sbi->dax_dev, NULL);
erofs_fscache_unregister_fs(sb);
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 1d5abfdf0f22a..fb0628e680c40 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -769,7 +769,7 @@ static int ioctl_getfsuuid(struct file *file, void __user *argp)
struct fsuuid2 u = { .len = sb->s_uuid_len, };
if (!sb->s_uuid_len)
- return -ENOIOCTLCMD;
+ return -ENOTTY;
memcpy(&u.uuid[0], &sb->s_uuid, sb->s_uuid_len);
@@ -781,7 +781,7 @@ static int ioctl_get_fs_sysfs_path(struct file *file, void __user *argp)
struct super_block *sb = file_inode(file)->i_sb;
if (!strlen(sb->s_sysfs_name))
- return -ENOIOCTLCMD;
+ return -ENOTTY;
struct fs_sysfs_path u = {};
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
index 9a0d32e4b422a..267b622d923b1 100644
--- a/fs/netfs/buffered_write.c
+++ b/fs/netfs/buffered_write.c
@@ -164,7 +164,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
enum netfs_how_to_modify howto;
enum netfs_folio_trace trace;
unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC;
- ssize_t written = 0, ret;
+ ssize_t written = 0, ret, ret2;
loff_t i_size, pos = iocb->ki_pos, from, to;
size_t max_chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
bool maybe_trouble = false;
@@ -172,15 +172,14 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) ||
iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC))
) {
- if (pos < i_size_read(inode)) {
- ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
- if (ret < 0) {
- goto out;
- }
- }
-
wbc_attach_fdatawrite_inode(&wbc, mapping->host);
+ ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
+ if (ret < 0) {
+ wbc_detach_inode(&wbc);
+ goto out;
+ }
+
wreq = netfs_begin_writethrough(iocb, iter->count);
if (IS_ERR(wreq)) {
wbc_detach_inode(&wbc);
@@ -395,10 +394,12 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
out:
if (unlikely(wreq)) {
- ret = netfs_end_writethrough(wreq, iocb);
+ ret2 = netfs_end_writethrough(wreq, iocb);
wbc_detach_inode(&wbc);
- if (ret == -EIOCBQUEUED)
- return ret;
+ if (ret2 == -EIOCBQUEUED)
+ return ret2;
+ if (ret == 0)
+ ret = ret2;
}
iocb->ki_pos += written;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index c709c296ea9a4..acef52ecb1bb7 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -2429,7 +2429,12 @@ static int nfs_net_init(struct net *net)
struct nfs_net *nn = net_generic(net, nfs_net_id);
nfs_clients_init(net);
- rpc_proc_register(net, &nn->rpcstats);
+
+ if (!rpc_proc_register(net, &nn->rpcstats)) {
+ nfs_clients_exit(net);
+ return -ENOMEM;
+ }
+
return nfs_fs_proc_net_init(net);
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 1955481832e03..a644460f3a5e7 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -3515,6 +3515,7 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
args.exp = exp;
args.dentry = dentry;
args.ignore_crossmnt = (ignore_crossmnt != 0);
+ args.acl = NULL;
/*
* Make a local copy of the attribute bitmap that can be modified.
@@ -3573,7 +3574,6 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
} else
args.fhp = fhp;
- args.acl = NULL;
if (attrmask[0] & FATTR4_WORD0_ACL) {
err = nfsd4_get_nfs4_acl(rqstp, dentry, &args.acl);
if (err == -EOPNOTSUPP)
diff --git a/fs/ntfs3/Kconfig b/fs/ntfs3/Kconfig
index cdfdf51e55d79..7bc31d69f680d 100644
--- a/fs/ntfs3/Kconfig
+++ b/fs/ntfs3/Kconfig
@@ -46,3 +46,12 @@ config NTFS3_FS_POSIX_ACL
NOTE: this is linux only feature. Windows will ignore these ACLs.
If you don't know what Access Control Lists are, say N.
+
+config NTFS_FS
+ tristate "NTFS file system support"
+ select NTFS3_FS
+ select BUFFER_HEAD
+ select NLS
+ help
+ This config option is here only for backward compatibility. NTFS
+ filesystem is now handled by the NTFS3 driver.
diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
index 5cf3d9decf646..263635199b60d 100644
--- a/fs/ntfs3/dir.c
+++ b/fs/ntfs3/dir.c
@@ -616,4 +616,11 @@ const struct file_operations ntfs_dir_operations = {
.compat_ioctl = ntfs_compat_ioctl,
#endif
};
+
+const struct file_operations ntfs_legacy_dir_operations = {
+ .llseek = generic_file_llseek,
+ .read = generic_read_dir,
+ .iterate_shared = ntfs_readdir,
+ .open = ntfs_file_open,
+};
// clang-format on
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index 5418662c80d88..b73969e05052a 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -1236,4 +1236,12 @@ const struct file_operations ntfs_file_operations = {
.fallocate = ntfs_fallocate,
.release = ntfs_file_release,
};
+
+const struct file_operations ntfs_legacy_file_operations = {
+ .llseek = generic_file_llseek,
+ .read_iter = ntfs_file_read_iter,
+ .splice_read = ntfs_file_splice_read,
+ .open = ntfs_file_open,
+ .release = ntfs_file_release,
+};
// clang-format on
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index eb7a8c9fba018..d273eda1cf45d 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -440,7 +440,10 @@ end_enum:
* Usually a hard links to directories are disabled.
*/
inode->i_op = &ntfs_dir_inode_operations;
- inode->i_fop = &ntfs_dir_operations;
+ if (is_legacy_ntfs(inode->i_sb))
+ inode->i_fop = &ntfs_legacy_dir_operations;
+ else
+ inode->i_fop = &ntfs_dir_operations;
ni->i_valid = 0;
} else if (S_ISLNK(mode)) {
ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
@@ -450,7 +453,10 @@ end_enum:
} else if (S_ISREG(mode)) {
ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
inode->i_op = &ntfs_file_inode_operations;
- inode->i_fop = &ntfs_file_operations;
+ if (is_legacy_ntfs(inode->i_sb))
+ inode->i_fop = &ntfs_legacy_file_operations;
+ else
+ inode->i_fop = &ntfs_file_operations;
inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr :
&ntfs_aops;
if (ino != MFT_REC_MFT)
@@ -1614,7 +1620,10 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
if (S_ISDIR(mode)) {
inode->i_op = &ntfs_dir_inode_operations;
- inode->i_fop = &ntfs_dir_operations;
+ if (is_legacy_ntfs(inode->i_sb))
+ inode->i_fop = &ntfs_legacy_dir_operations;
+ else
+ inode->i_fop = &ntfs_dir_operations;
} else if (S_ISLNK(mode)) {
inode->i_op = &ntfs_link_inode_operations;
inode->i_fop = NULL;
@@ -1623,7 +1632,10 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
inode_nohighmem(inode);
} else if (S_ISREG(mode)) {
inode->i_op = &ntfs_file_inode_operations;
- inode->i_fop = &ntfs_file_operations;
+ if (is_legacy_ntfs(inode->i_sb))
+ inode->i_fop = &ntfs_legacy_file_operations;
+ else
+ inode->i_fop = &ntfs_file_operations;
inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr :
&ntfs_aops;
init_rwsem(&ni->file.run_lock);
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index ea5b5e814e634..c018fad4c0372 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -493,6 +493,7 @@ struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni,
struct ntfs_fnd *fnd);
bool dir_is_empty(struct inode *dir);
extern const struct file_operations ntfs_dir_operations;
+extern const struct file_operations ntfs_legacy_dir_operations;
/* Globals from file.c */
int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path,
@@ -507,6 +508,7 @@ long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg);
extern const struct inode_operations ntfs_special_inode_operations;
extern const struct inode_operations ntfs_file_inode_operations;
extern const struct file_operations ntfs_file_operations;
+extern const struct file_operations ntfs_legacy_file_operations;
/* Globals from frecord.c */
void ni_remove_mi(struct ntfs_inode *ni, struct mft_inode *mi);
@@ -1154,4 +1156,6 @@ static inline void le64_sub_cpu(__le64 *var, u64 val)
*var = cpu_to_le64(le64_to_cpu(*var) - val);
}
+bool is_legacy_ntfs(struct super_block *sb);
+
#endif /* _LINUX_NTFS3_NTFS_FS_H */
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 71dfeb0c43238..f41e01c5676ab 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -408,6 +408,12 @@ static int ntfs_fs_reconfigure(struct fs_context *fc)
struct ntfs_mount_options *new_opts = fc->fs_private;
int ro_rw;
+ /* If ntfs3 is used as legacy ntfs enforce read-only mode. */
+ if (is_legacy_ntfs(sb)) {
+ fc->sb_flags |= SB_RDONLY;
+ goto out;
+ }
+
ro_rw = sb_rdonly(sb) && !(fc->sb_flags & SB_RDONLY);
if (ro_rw && (sbi->flags & NTFS_FLAGS_NEED_REPLAY)) {
errorf(fc,
@@ -427,8 +433,6 @@ static int ntfs_fs_reconfigure(struct fs_context *fc)
fc,
"ntfs3: Cannot use different iocharset when remounting!");
- sync_filesystem(sb);
-
if (ro_rw && (sbi->volume.flags & VOLUME_FLAG_DIRTY) &&
!new_opts->force) {
errorf(fc,
@@ -436,6 +440,8 @@ static int ntfs_fs_reconfigure(struct fs_context *fc)
return -EINVAL;
}
+out:
+ sync_filesystem(sb);
swap(sbi->options, fc->fs_private);
return 0;
@@ -1613,6 +1619,8 @@ load_root:
}
#endif
+ if (is_legacy_ntfs(sb))
+ sb->s_flags |= SB_RDONLY;
return 0;
put_inode_out:
@@ -1730,7 +1738,7 @@ static const struct fs_context_operations ntfs_context_ops = {
* This will called when mount/remount. We will first initialize
* options so that if remount we can use just that.
*/
-static int ntfs_init_fs_context(struct fs_context *fc)
+static int __ntfs_init_fs_context(struct fs_context *fc)
{
struct ntfs_mount_options *opts;
struct ntfs_sb_info *sbi;
@@ -1778,6 +1786,11 @@ free_opts:
return -ENOMEM;
}
+static int ntfs_init_fs_context(struct fs_context *fc)
+{
+ return __ntfs_init_fs_context(fc);
+}
+
static void ntfs3_kill_sb(struct super_block *sb)
{
struct ntfs_sb_info *sbi = sb->s_fs_info;
@@ -1798,6 +1811,50 @@ static struct file_system_type ntfs_fs_type = {
.kill_sb = ntfs3_kill_sb,
.fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
+
+#if IS_ENABLED(CONFIG_NTFS_FS)
+static int ntfs_legacy_init_fs_context(struct fs_context *fc)
+{
+ int ret;
+
+ ret = __ntfs_init_fs_context(fc);
+ /* If ntfs3 is used as legacy ntfs enforce read-only mode. */
+ fc->sb_flags |= SB_RDONLY;
+ return ret;
+}
+
+static struct file_system_type ntfs_legacy_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "ntfs",
+ .init_fs_context = ntfs_legacy_init_fs_context,
+ .parameters = ntfs_fs_parameters,
+ .kill_sb = ntfs3_kill_sb,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
+};
+MODULE_ALIAS_FS("ntfs");
+
+static inline void register_as_ntfs_legacy(void)
+{
+ int err = register_filesystem(&ntfs_legacy_fs_type);
+ if (err)
+ pr_warn("ntfs3: Failed to register legacy ntfs filesystem driver: %d\n", err);
+}
+
+static inline void unregister_as_ntfs_legacy(void)
+{
+ unregister_filesystem(&ntfs_legacy_fs_type);
+}
+bool is_legacy_ntfs(struct super_block *sb)
+{
+ return sb->s_type == &ntfs_legacy_fs_type;
+}
+#else
+static inline void register_as_ntfs_legacy(void) {}
+static inline void unregister_as_ntfs_legacy(void) {}
+bool is_legacy_ntfs(struct super_block *sb) { return false; }
+#endif
+
+
// clang-format on
static int __init init_ntfs_fs(void)
@@ -1832,6 +1889,7 @@ static int __init init_ntfs_fs(void)
goto out1;
}
+ register_as_ntfs_legacy();
err = register_filesystem(&ntfs_fs_type);
if (err)
goto out;
@@ -1849,6 +1907,7 @@ static void __exit exit_ntfs_fs(void)
rcu_barrier();
kmem_cache_destroy(ntfs_inode_cachep);
unregister_filesystem(&ntfs_fs_type);
+ unregister_as_ntfs_legacy();
ntfs3_exit_bitmap();
#ifdef CONFIG_PROC_FS
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 195b077c0facb..9223856c934b4 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -67,7 +67,7 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
*/
ppage = pfn_to_online_page(pfn);
- if (!ppage || PageSlab(ppage) || page_has_type(ppage))
+ if (!ppage)
pcount = 0;
else
pcount = page_mapcount(ppage);
@@ -124,11 +124,8 @@ u64 stable_page_flags(struct page *page)
/*
* pseudo flags for the well known (anonymous) memory mapped pages
- *
- * Note that page->_mapcount is overloaded in SLAB, so the
- * simple test in page_mapped() is not enough.
*/
- if (!PageSlab(page) && page_mapped(page))
+ if (page_mapped(page))
u |= 1 << KPF_MMAP;
if (PageAnon(page))
u |= 1 << KPF_ANON;
diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
index c0513fbb8a59d..c46d418c1c0c3 100644
--- a/fs/smb/client/cifspdu.h
+++ b/fs/smb/client/cifspdu.h
@@ -882,7 +882,7 @@ typedef struct smb_com_open_rsp {
__u8 OplockLevel;
__u16 Fid;
__le32 CreateAction;
- struct_group(common_attributes,
+ struct_group_attr(common_attributes, __packed,
__le64 CreationTime;
__le64 LastAccessTime;
__le64 LastWriteTime;
@@ -2266,7 +2266,7 @@ typedef struct {
/* QueryFileInfo/QueryPathinfo (also for SetPath/SetFile) data buffer formats */
/******************************************************************************/
typedef struct { /* data block encoding of response to level 263 QPathInfo */
- struct_group(common_attributes,
+ struct_group_attr(common_attributes, __packed,
__le64 CreationTime;
__le64 LastAccessTime;
__le64 LastWriteTime;
diff --git a/fs/smb/client/smb2pdu.h b/fs/smb/client/smb2pdu.h
index c72a3b2886b7f..2fccf0d4f53d2 100644
--- a/fs/smb/client/smb2pdu.h
+++ b/fs/smb/client/smb2pdu.h
@@ -320,7 +320,7 @@ struct smb2_file_reparse_point_info {
} __packed;
struct smb2_file_network_open_info {
- struct_group(network_open_info,
+ struct_group_attr(network_open_info, __packed,
__le64 CreationTime;
__le64 LastAccessTime;
__le64 LastWriteTime;
diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
index 994d701934329..ddf1a3aafee5c 100644
--- a/fs/smb/client/transport.c
+++ b/fs/smb/client/transport.c
@@ -909,12 +909,15 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED;
}
+ spin_unlock(&server->mid_lock);
cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
__func__, mid->mid, mid->mid_state);
rc = -EIO;
+ goto sync_mid_done;
}
spin_unlock(&server->mid_lock);
+sync_mid_done:
release_mid(mid);
return rc;
}
@@ -1057,9 +1060,11 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
index = (uint)atomic_inc_return(&ses->chan_seq);
index %= ses->chan_count;
}
+
+ server = ses->chans[index].server;
spin_unlock(&ses->chan_lock);
- return ses->chans[index].server;
+ return server;
}
int
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 272e4e79e15c4..861c3bfc5f170 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -221,7 +221,18 @@ void cpuhp_report_idle_dead(void);
static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+#ifdef CONFIG_CPU_MITIGATIONS
extern bool cpu_mitigations_off(void);
extern bool cpu_mitigations_auto_nosmt(void);
+#else
+static inline bool cpu_mitigations_off(void)
+{
+ return true;
+}
+static inline bool cpu_mitigations_auto_nosmt(void)
+{
+ return false;
+}
+#endif
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 7a27f19bf44d0..d5fea03cb6e61 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1001,6 +1001,7 @@ bool bpf_jit_supports_exceptions(void);
bool bpf_jit_supports_ptr_xchg(void);
bool bpf_jit_supports_arena(void);
bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena);
+u64 bpf_arch_uaddress_limit(void);
void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
bool bpf_helper_changes_pkt_data(void *func);
diff --git a/include/linux/firmware/qcom/qcom_qseecom.h b/include/linux/firmware/qcom/qcom_qseecom.h
index 5c28298a98bec..366243ee96096 100644
--- a/include/linux/firmware/qcom/qcom_qseecom.h
+++ b/include/linux/firmware/qcom/qcom_qseecom.h
@@ -10,6 +10,7 @@
#define __QCOM_QSEECOM_H
#include <linux/auxiliary_bus.h>
+#include <linux/dma-mapping.h>
#include <linux/types.h>
#include <linux/firmware/qcom/qcom_scm.h>
@@ -25,11 +26,56 @@ struct qseecom_client {
};
/**
+ * qseecom_scm_dev() - Get the SCM device associated with the QSEECOM client.
+ * @client: The QSEECOM client device.
+ *
+ * Returns the SCM device under which the provided QSEECOM client device
+ * operates. This function is intended to be used for DMA allocations.
+ */
+static inline struct device *qseecom_scm_dev(struct qseecom_client *client)
+{
+ return client->aux_dev.dev.parent->parent;
+}
+
+/**
+ * qseecom_dma_alloc() - Allocate DMA memory for a QSEECOM client.
+ * @client: The QSEECOM client to allocate the memory for.
+ * @size: The number of bytes to allocate.
+ * @dma_handle: Pointer to where the DMA address should be stored.
+ * @gfp: Allocation flags.
+ *
+ * Wrapper function for dma_alloc_coherent(), allocating DMA memory usable for
+ * TZ/QSEECOM communication. Refer to dma_alloc_coherent() for details.
+ */
+static inline void *qseecom_dma_alloc(struct qseecom_client *client, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ return dma_alloc_coherent(qseecom_scm_dev(client), size, dma_handle, gfp);
+}
+
+/**
+ * dma_free_coherent() - Free QSEECOM DMA memory.
+ * @client: The QSEECOM client for which the memory has been allocated.
+ * @size: The number of bytes allocated.
+ * @cpu_addr: Virtual memory address to free.
+ * @dma_handle: DMA memory address to free.
+ *
+ * Wrapper function for dma_free_coherent(), freeing memory previously
+ * allocated with qseecom_dma_alloc(). Refer to dma_free_coherent() for
+ * details.
+ */
+static inline void qseecom_dma_free(struct qseecom_client *client, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ return dma_free_coherent(qseecom_scm_dev(client), size, cpu_addr, dma_handle);
+}
+
+/**
* qcom_qseecom_app_send() - Send to and receive data from a given QSEE app.
* @client: The QSEECOM client associated with the target app.
- * @req: Request buffer sent to the app (must be DMA-mappable).
+ * @req: DMA address of the request buffer sent to the app.
* @req_size: Size of the request buffer.
- * @rsp: Response buffer, written to by the app (must be DMA-mappable).
+ * @rsp: DMA address of the response buffer, written to by the app.
* @rsp_size: Size of the response buffer.
*
* Sends a request to the QSEE app associated with the given client and read
@@ -43,8 +89,9 @@ struct qseecom_client {
*
* Return: Zero on success, nonzero on failure.
*/
-static inline int qcom_qseecom_app_send(struct qseecom_client *client, void *req, size_t req_size,
- void *rsp, size_t rsp_size)
+static inline int qcom_qseecom_app_send(struct qseecom_client *client,
+ dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size)
{
return qcom_scm_qseecom_app_send(client->app_id, req, req_size, rsp, rsp_size);
}
diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
index ccaf288460546..aaa19f93ac430 100644
--- a/include/linux/firmware/qcom/qcom_scm.h
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -118,8 +118,8 @@ bool qcom_scm_lmh_dcvsh_available(void);
#ifdef CONFIG_QCOM_QSEECOM
int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id);
-int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
- size_t rsp_size);
+int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size);
#else /* CONFIG_QCOM_QSEECOM */
@@ -128,9 +128,9 @@ static inline int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
return -EINVAL;
}
-static inline int qcom_scm_qseecom_app_send(u32 app_id, void *req,
- size_t req_size, void *rsp,
- size_t rsp_size)
+static inline int qcom_scm_qseecom_app_send(u32 app_id,
+ dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size)
{
return -EINVAL;
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7b0ee64225de9..b6bdaa18b9e9d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1223,14 +1223,16 @@ static inline void page_mapcount_reset(struct page *page)
* a large folio, it includes the number of times this page is mapped
* as part of that folio.
*
- * The result is undefined for pages which cannot be mapped into userspace.
- * For example SLAB or special types of pages. See function page_has_type().
- * They use this field in struct page differently.
+ * Will report 0 for pages which cannot be mapped into userspace, eg
+ * slab, page tables and similar.
*/
static inline int page_mapcount(struct page *page)
{
int mapcount = atomic_read(&page->_mapcount) + 1;
+ /* Handle page_has_type() pages */
+ if (mapcount < 0)
+ mapcount = 0;
if (unlikely(PageCompound(page)))
mapcount += folio_entire_mapcount(page_folio(page));
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 652d77805e99d..4bf1c25fd1dc5 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -190,7 +190,6 @@ enum pageflags {
/* At least one page in this folio has the hwpoison flag set */
PG_has_hwpoisoned = PG_error,
- PG_hugetlb = PG_active,
PG_large_rmappable = PG_workingset, /* anon or file-backed */
};
@@ -458,30 +457,51 @@ static __always_inline int TestClearPage##uname(struct page *page) \
TESTSETFLAG(uname, lname, policy) \
TESTCLEARFLAG(uname, lname, policy)
+#define FOLIO_TEST_FLAG_FALSE(name) \
+static inline bool folio_test_##name(const struct folio *folio) \
+{ return false; }
+#define FOLIO_SET_FLAG_NOOP(name) \
+static inline void folio_set_##name(struct folio *folio) { }
+#define FOLIO_CLEAR_FLAG_NOOP(name) \
+static inline void folio_clear_##name(struct folio *folio) { }
+#define __FOLIO_SET_FLAG_NOOP(name) \
+static inline void __folio_set_##name(struct folio *folio) { }
+#define __FOLIO_CLEAR_FLAG_NOOP(name) \
+static inline void __folio_clear_##name(struct folio *folio) { }
+#define FOLIO_TEST_SET_FLAG_FALSE(name) \
+static inline bool folio_test_set_##name(struct folio *folio) \
+{ return false; }
+#define FOLIO_TEST_CLEAR_FLAG_FALSE(name) \
+static inline bool folio_test_clear_##name(struct folio *folio) \
+{ return false; }
+
+#define FOLIO_FLAG_FALSE(name) \
+FOLIO_TEST_FLAG_FALSE(name) \
+FOLIO_SET_FLAG_NOOP(name) \
+FOLIO_CLEAR_FLAG_NOOP(name)
+
#define TESTPAGEFLAG_FALSE(uname, lname) \
-static inline bool folio_test_##lname(const struct folio *folio) { return false; } \
+FOLIO_TEST_FLAG_FALSE(lname) \
static inline int Page##uname(const struct page *page) { return 0; }
#define SETPAGEFLAG_NOOP(uname, lname) \
-static inline void folio_set_##lname(struct folio *folio) { } \
+FOLIO_SET_FLAG_NOOP(lname) \
static inline void SetPage##uname(struct page *page) { }
#define CLEARPAGEFLAG_NOOP(uname, lname) \
-static inline void folio_clear_##lname(struct folio *folio) { } \
+FOLIO_CLEAR_FLAG_NOOP(lname) \
static inline void ClearPage##uname(struct page *page) { }
#define __CLEARPAGEFLAG_NOOP(uname, lname) \
-static inline void __folio_clear_##lname(struct folio *folio) { } \
+__FOLIO_CLEAR_FLAG_NOOP(lname) \
static inline void __ClearPage##uname(struct page *page) { }
#define TESTSETFLAG_FALSE(uname, lname) \
-static inline bool folio_test_set_##lname(struct folio *folio) \
-{ return 0; } \
+FOLIO_TEST_SET_FLAG_FALSE(lname) \
static inline int TestSetPage##uname(struct page *page) { return 0; }
#define TESTCLEARFLAG_FALSE(uname, lname) \
-static inline bool folio_test_clear_##lname(struct folio *folio) \
-{ return 0; } \
+FOLIO_TEST_CLEAR_FLAG_FALSE(lname) \
static inline int TestClearPage##uname(struct page *page) { return 0; }
#define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \
@@ -855,29 +875,6 @@ TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable)
#define PG_head_mask ((1UL << PG_head))
-#ifdef CONFIG_HUGETLB_PAGE
-int PageHuge(const struct page *page);
-SETPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
-CLEARPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
-
-/**
- * folio_test_hugetlb - Determine if the folio belongs to hugetlbfs
- * @folio: The folio to test.
- *
- * Context: Any context. Caller should have a reference on the folio to
- * prevent it from being turned into a tail page.
- * Return: True for hugetlbfs folios, false for anon folios or folios
- * belonging to other filesystems.
- */
-static inline bool folio_test_hugetlb(const struct folio *folio)
-{
- return folio_test_large(folio) &&
- test_bit(PG_hugetlb, const_folio_flags(folio, 1));
-}
-#else
-TESTPAGEFLAG_FALSE(Huge, hugetlb)
-#endif
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* PageHuge() only returns true for hugetlbfs pages, but not for
@@ -934,33 +931,22 @@ PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
#endif
/*
- * Check if a page is currently marked HWPoisoned. Note that this check is
- * best effort only and inherently racy: there is no way to synchronize with
- * failing hardware.
- */
-static inline bool is_page_hwpoison(struct page *page)
-{
- if (PageHWPoison(page))
- return true;
- return PageHuge(page) && PageHWPoison(compound_head(page));
-}
-
-/*
* For pages that are never mapped to userspace (and aren't PageSlab),
* page_type may be used. Because it is initialised to -1, we invert the
* sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
* __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
- * low bits so that an underflow or overflow of page_mapcount() won't be
+ * low bits so that an underflow or overflow of _mapcount won't be
* mistaken for a page type value.
*/
#define PAGE_TYPE_BASE 0xf0000000
-/* Reserve 0x0000007f to catch underflows of page_mapcount */
+/* Reserve 0x0000007f to catch underflows of _mapcount */
#define PAGE_MAPCOUNT_RESERVE -128
#define PG_buddy 0x00000080
#define PG_offline 0x00000100
#define PG_table 0x00000200
#define PG_guard 0x00000400
+#define PG_hugetlb 0x00000800
#define PageType(page, flag) \
((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
@@ -977,35 +963,38 @@ static inline int page_has_type(const struct page *page)
return page_type_has_type(page->page_type);
}
+#define FOLIO_TYPE_OPS(lname, fname) \
+static __always_inline bool folio_test_##fname(const struct folio *folio)\
+{ \
+ return folio_test_type(folio, PG_##lname); \
+} \
+static __always_inline void __folio_set_##fname(struct folio *folio) \
+{ \
+ VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \
+ folio->page.page_type &= ~PG_##lname; \
+} \
+static __always_inline void __folio_clear_##fname(struct folio *folio) \
+{ \
+ VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
+ folio->page.page_type |= PG_##lname; \
+}
+
#define PAGE_TYPE_OPS(uname, lname, fname) \
+FOLIO_TYPE_OPS(lname, fname) \
static __always_inline int Page##uname(const struct page *page) \
{ \
return PageType(page, PG_##lname); \
} \
-static __always_inline int folio_test_##fname(const struct folio *folio)\
-{ \
- return folio_test_type(folio, PG_##lname); \
-} \
static __always_inline void __SetPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!PageType(page, 0), page); \
page->page_type &= ~PG_##lname; \
} \
-static __always_inline void __folio_set_##fname(struct folio *folio) \
-{ \
- VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \
- folio->page.page_type &= ~PG_##lname; \
-} \
static __always_inline void __ClearPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!Page##uname(page), page); \
page->page_type |= PG_##lname; \
-} \
-static __always_inline void __folio_clear_##fname(struct folio *folio) \
-{ \
- VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
- folio->page.page_type |= PG_##lname; \
-} \
+}
/*
* PageBuddy() indicates that the page is free and in the buddy system
@@ -1052,6 +1041,37 @@ PAGE_TYPE_OPS(Table, table, pgtable)
*/
PAGE_TYPE_OPS(Guard, guard, guard)
+#ifdef CONFIG_HUGETLB_PAGE
+FOLIO_TYPE_OPS(hugetlb, hugetlb)
+#else
+FOLIO_TEST_FLAG_FALSE(hugetlb)
+#endif
+
+/**
+ * PageHuge - Determine if the page belongs to hugetlbfs
+ * @page: The page to test.
+ *
+ * Context: Any context.
+ * Return: True for hugetlbfs pages, false for anon pages or pages
+ * belonging to other filesystems.
+ */
+static inline bool PageHuge(const struct page *page)
+{
+ return folio_test_hugetlb(page_folio(page));
+}
+
+/*
+ * Check if a page is currently marked HWPoisoned. Note that this check is
+ * best effort only and inherently racy: there is no way to synchronize with
+ * failing hardware.
+ */
+static inline bool is_page_hwpoison(struct page *page)
+{
+ if (PageHWPoison(page))
+ return true;
+ return PageHuge(page) && PageHWPoison(compound_head(page));
+}
+
extern bool is_free_buddy_page(struct page *page);
PAGEFLAG(Isolated, isolated, PF_ANY);
@@ -1118,7 +1138,7 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
*/
#define PAGE_FLAGS_SECOND \
(0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \
- 1UL << PG_hugetlb | 1UL << PG_large_rmappable)
+ 1UL << PG_large_rmappable)
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
diff --git a/include/linux/profile.h b/include/linux/profile.h
index 11db1ec516e27..04ae5ebcb637a 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -18,13 +18,8 @@ struct proc_dir_entry;
struct notifier_block;
#if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS)
-void create_prof_cpu_mask(void);
int create_proc_profile(void);
#else
-static inline void create_prof_cpu_mask(void)
-{
-}
-
static inline int create_proc_profile(void)
{
return 0;
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 4660582a33022..ed180ca419dad 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -320,13 +320,13 @@ devm_regulator_get_exclusive(struct device *dev, const char *id)
static inline int devm_regulator_get_enable(struct device *dev, const char *id)
{
- return -ENODEV;
+ return 0;
}
static inline int devm_regulator_get_enable_optional(struct device *dev,
const char *id)
{
- return -ENODEV;
+ return 0;
}
static inline struct regulator *__must_check
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 9c8dd4c014120..3f3246a6a6fb4 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -465,10 +465,12 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
{
+ read_lock_bh(&sk->sk_callback_lock);
if (psock->saved_data_ready)
psock->saved_data_ready(sk);
else
sk->sk_data_ready(sk);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static inline void psock_set_prog(struct bpf_prog **pprog,
diff --git a/include/net/gro.h b/include/net/gro.h
index 50f1e403dbbb3..c1d4ca0463a17 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -87,6 +87,15 @@ struct napi_gro_cb {
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
+
+ /* L3 offsets */
+ union {
+ struct {
+ u16 network_offset;
+ u16 inner_network_offset;
+ };
+ u16 network_offsets[2];
+ };
};
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index d801409b33cfe..d55e53ac91bd2 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -135,6 +135,7 @@ IF_HAVE_PG_ARCH_X(arch_3)
#define DEF_PAGETYPE_NAME(_name) { PG_##_name, __stringify(_name) }
#define __def_pagetype_names \
+ DEF_PAGETYPE_NAME(hugetlb), \
DEF_PAGETYPE_NAME(offline), \
DEF_PAGETYPE_NAME(guard), \
DEF_PAGETYPE_NAME(table), \
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
index d87410a8443aa..af024d90453dd 100644
--- a/include/uapi/drm/etnaviv_drm.h
+++ b/include/uapi/drm/etnaviv_drm.h
@@ -77,11 +77,6 @@ struct drm_etnaviv_timespec {
#define ETNAVIV_PARAM_GPU_PRODUCT_ID 0x1c
#define ETNAVIV_PARAM_GPU_CUSTOMER_ID 0x1d
#define ETNAVIV_PARAM_GPU_ECO_ID 0x1e
-#define ETNAVIV_PARAM_GPU_NN_CORE_COUNT 0x1f
-#define ETNAVIV_PARAM_GPU_NN_MAD_PER_CORE 0x20
-#define ETNAVIV_PARAM_GPU_TP_CORE_COUNT 0x21
-#define ETNAVIV_PARAM_GPU_ON_CHIP_SRAM_SIZE 0x22
-#define ETNAVIV_PARAM_GPU_AXI_SRAM_SIZE 0x23
#define ETNA_MAX_PIPES 4
diff --git a/include/uapi/linux/vdpa.h b/include/uapi/linux/vdpa.h
index 43c51698195ce..842bf1201ac41 100644
--- a/include/uapi/linux/vdpa.h
+++ b/include/uapi/linux/vdpa.h
@@ -57,7 +57,7 @@ enum vdpa_attr {
VDPA_ATTR_DEV_FEATURES, /* u64 */
VDPA_ATTR_DEV_BLK_CFG_CAPACITY, /* u64 */
- VDPA_ATTR_DEV_BLK_CFG_SEG_SIZE, /* u32 */
+ VDPA_ATTR_DEV_BLK_CFG_SIZE_MAX, /* u32 */
VDPA_ATTR_DEV_BLK_CFG_BLK_SIZE, /* u32 */
VDPA_ATTR_DEV_BLK_CFG_SEG_MAX, /* u32 */
VDPA_ATTR_DEV_BLK_CFG_NUM_QUEUES, /* u16 */
@@ -70,8 +70,8 @@ enum vdpa_attr {
VDPA_ATTR_DEV_BLK_CFG_DISCARD_SEC_ALIGN,/* u32 */
VDPA_ATTR_DEV_BLK_CFG_MAX_WRITE_ZEROES_SEC, /* u32 */
VDPA_ATTR_DEV_BLK_CFG_MAX_WRITE_ZEROES_SEG, /* u32 */
- VDPA_ATTR_DEV_BLK_CFG_READ_ONLY, /* u8 */
- VDPA_ATTR_DEV_BLK_CFG_FLUSH, /* u8 */
+ VDPA_ATTR_DEV_BLK_READ_ONLY, /* u8 */
+ VDPA_ATTR_DEV_BLK_FLUSH, /* u8 */
/* new attributes must be added above here */
VDPA_ATTR_MAX,
diff --git a/init/Kconfig b/init/Kconfig
index aa02aec6aa7d2..664bedb9a71fb 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1899,11 +1899,11 @@ config RUST
bool "Rust support"
depends on HAVE_RUST
depends on RUST_IS_AVAILABLE
+ depends on !CFI_CLANG
depends on !MODVERSIONS
depends on !GCC_PLUGINS
depends on !RANDSTRUCT
depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE
- select CONSTRUCTORS
help
Enables Rust support in the kernel.
diff --git a/kernel/bounds.c b/kernel/bounds.c
index c5a9fcd2d6228..29b2cd00df2cc 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -19,7 +19,7 @@ int main(void)
DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
#ifdef CONFIG_SMP
- DEFINE(NR_CPUS_BITS, bits_per(CONFIG_NR_CPUS));
+ DEFINE(NR_CPUS_BITS, order_base_2(CONFIG_NR_CPUS));
#endif
DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
#ifdef CONFIG_LRU_GEN
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 95c7fd093e556..192f67dd1f251 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2970,6 +2970,15 @@ bool __weak bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
return false;
}
+u64 __weak bpf_arch_uaddress_limit(void)
+{
+#if defined(CONFIG_64BIT) && defined(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE)
+ return TASK_SIZE;
+#else
+ return 0;
+#endif
+}
+
/* Return TRUE if the JIT backend satisfies the following two conditions:
* 1) JIT backend supports atomic_xchg() on pointer-sized words.
* 2) Under the specific arch, the implementation of xchg() is the same
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 87ff414899cf3..719c633e500ab 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -18469,8 +18469,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
f = fdget(fd);
map = __bpf_map_get(f);
if (IS_ERR(map)) {
- verbose(env, "fd %d is not pointing to valid bpf_map\n",
- insn[0].imm);
+ verbose(env, "fd %d is not pointing to valid bpf_map\n", fd);
return PTR_ERR(map);
}
@@ -19886,6 +19885,36 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
goto next_insn;
}
+ /* Make it impossible to de-reference a userspace address */
+ if (BPF_CLASS(insn->code) == BPF_LDX &&
+ (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
+ BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) {
+ struct bpf_insn *patch = &insn_buf[0];
+ u64 uaddress_limit = bpf_arch_uaddress_limit();
+
+ if (!uaddress_limit)
+ goto next_insn;
+
+ *patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg);
+ if (insn->off)
+ *patch++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_AX, insn->off);
+ *patch++ = BPF_ALU64_IMM(BPF_RSH, BPF_REG_AX, 32);
+ *patch++ = BPF_JMP_IMM(BPF_JLE, BPF_REG_AX, uaddress_limit >> 32, 2);
+ *patch++ = *insn;
+ *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ *patch++ = BPF_MOV64_IMM(insn->dst_reg, 0);
+
+ cnt = patch - insn_buf;
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+
+ delta += cnt - 1;
+ env->prog = prog = new_prog;
+ insn = new_prog->insnsi + i + delta;
+ goto next_insn;
+ }
+
/* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */
if (BPF_CLASS(insn->code) == BPF_LD &&
(BPF_MODE(insn->code) == BPF_ABS ||
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 07ad53b7f1195..63447eb85dab6 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -3196,6 +3196,7 @@ void __init boot_cpu_hotplug_init(void)
this_cpu_write(cpuhp_state.target, CPUHP_ONLINE);
}
+#ifdef CONFIG_CPU_MITIGATIONS
/*
* These are used for a global "mitigations=" cmdline option for toggling
* optional CPU mitigations.
@@ -3206,9 +3207,7 @@ enum cpu_mitigations {
CPU_MITIGATIONS_AUTO_NOSMT,
};
-static enum cpu_mitigations cpu_mitigations __ro_after_init =
- IS_ENABLED(CONFIG_SPECULATION_MITIGATIONS) ? CPU_MITIGATIONS_AUTO :
- CPU_MITIGATIONS_OFF;
+static enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
static int __init mitigations_parse_cmdline(char *arg)
{
@@ -3224,7 +3223,6 @@ static int __init mitigations_parse_cmdline(char *arg)
return 0;
}
-early_param("mitigations", mitigations_parse_cmdline);
/* mitigations=off */
bool cpu_mitigations_off(void)
@@ -3239,3 +3237,11 @@ bool cpu_mitigations_auto_nosmt(void)
return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
}
EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
+#else
+static int __init mitigations_parse_cmdline(char *arg)
+{
+ pr_crit("Kernel compiled without mitigations, ignoring 'mitigations'; system may still be vulnerable\n");
+ return 0;
+}
+#endif
+early_param("mitigations", mitigations_parse_cmdline);
diff --git a/kernel/profile.c b/kernel/profile.c
index 8a77769bc4b4c..2b775cc5c28f9 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -344,49 +344,6 @@ void profile_tick(int type)
#include <linux/seq_file.h>
#include <linux/uaccess.h>
-static int prof_cpu_mask_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%*pb\n", cpumask_pr_args(prof_cpu_mask));
- return 0;
-}
-
-static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, prof_cpu_mask_proc_show, NULL);
-}
-
-static ssize_t prof_cpu_mask_proc_write(struct file *file,
- const char __user *buffer, size_t count, loff_t *pos)
-{
- cpumask_var_t new_value;
- int err;
-
- if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
- return -ENOMEM;
-
- err = cpumask_parse_user(buffer, count, new_value);
- if (!err) {
- cpumask_copy(prof_cpu_mask, new_value);
- err = count;
- }
- free_cpumask_var(new_value);
- return err;
-}
-
-static const struct proc_ops prof_cpu_mask_proc_ops = {
- .proc_open = prof_cpu_mask_proc_open,
- .proc_read = seq_read,
- .proc_lseek = seq_lseek,
- .proc_release = single_release,
- .proc_write = prof_cpu_mask_proc_write,
-};
-
-void create_prof_cpu_mask(void)
-{
- /* create /proc/irq/prof_cpu_mask */
- proc_create("irq/prof_cpu_mask", 0600, NULL, &prof_cpu_mask_proc_ops);
-}
-
/*
* This function accesses profiling information. The returned data is
* binary: the sampling step and the actual contents of the profile
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 03be0d1330a6b..c62805dbd6088 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -696,15 +696,21 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
*
* XXX could add max_slice to the augmented data to track this.
*/
-static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static s64 entity_lag(u64 avruntime, struct sched_entity *se)
{
- s64 lag, limit;
+ s64 vlag, limit;
+
+ vlag = avruntime - se->vruntime;
+ limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
+ return clamp(vlag, -limit, limit);
+}
+
+static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
SCHED_WARN_ON(!se->on_rq);
- lag = avg_vruntime(cfs_rq) - se->vruntime;
- limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
- se->vlag = clamp(lag, -limit, limit);
+ se->vlag = entity_lag(avg_vruntime(cfs_rq), se);
}
/*
@@ -3676,11 +3682,10 @@ static inline void
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
#endif
-static void reweight_eevdf(struct cfs_rq *cfs_rq, struct sched_entity *se,
+static void reweight_eevdf(struct sched_entity *se, u64 avruntime,
unsigned long weight)
{
unsigned long old_weight = se->load.weight;
- u64 avruntime = avg_vruntime(cfs_rq);
s64 vlag, vslice;
/*
@@ -3761,7 +3766,7 @@ static void reweight_eevdf(struct cfs_rq *cfs_rq, struct sched_entity *se,
* = V - vl'
*/
if (avruntime != se->vruntime) {
- vlag = (s64)(avruntime - se->vruntime);
+ vlag = entity_lag(avruntime, se);
vlag = div_s64(vlag * old_weight, weight);
se->vruntime = avruntime - vlag;
}
@@ -3787,25 +3792,26 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
unsigned long weight)
{
bool curr = cfs_rq->curr == se;
+ u64 avruntime;
if (se->on_rq) {
/* commit outstanding execution time */
- if (curr)
- update_curr(cfs_rq);
- else
+ update_curr(cfs_rq);
+ avruntime = avg_vruntime(cfs_rq);
+ if (!curr)
__dequeue_entity(cfs_rq, se);
update_load_sub(&cfs_rq->load, se->load.weight);
}
dequeue_load_avg(cfs_rq, se);
- if (!se->on_rq) {
+ if (se->on_rq) {
+ reweight_eevdf(se, avruntime, weight);
+ } else {
/*
* Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i),
* we need to scale se->vlag when w_i changes.
*/
se->vlag = div_s64(se->vlag * se->load.weight, weight);
- } else {
- reweight_eevdf(cfs_rq, se, weight);
}
update_load_set(&se->load, weight);
diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c
index 373d42c707bc5..5891e715f00d0 100644
--- a/kernel/sched/isolation.c
+++ b/kernel/sched/isolation.c
@@ -46,7 +46,16 @@ int housekeeping_any_cpu(enum hk_type type)
if (cpu < nr_cpu_ids)
return cpu;
- return cpumask_any_and(housekeeping.cpumasks[type], cpu_online_mask);
+ cpu = cpumask_any_and(housekeeping.cpumasks[type], cpu_online_mask);
+ if (likely(cpu < nr_cpu_ids))
+ return cpu;
+ /*
+ * Unless we have another problem this can only happen
+ * at boot time before start_secondary() brings the 1st
+ * housekeeping CPU up.
+ */
+ WARN_ON_ONCE(system_state == SYSTEM_RUNNING ||
+ type != HK_TYPE_TIMER);
}
}
return smp_processor_id();
@@ -109,6 +118,7 @@ static void __init housekeeping_setup_type(enum hk_type type,
static int __init housekeeping_setup(char *str, unsigned long flags)
{
cpumask_var_t non_housekeeping_mask, housekeeping_staging;
+ unsigned int first_cpu;
int err = 0;
if ((flags & HK_FLAG_TICK) && !(housekeeping.flags & HK_FLAG_TICK)) {
@@ -129,7 +139,8 @@ static int __init housekeeping_setup(char *str, unsigned long flags)
cpumask_andnot(housekeeping_staging,
cpu_possible_mask, non_housekeeping_mask);
- if (!cpumask_intersects(cpu_present_mask, housekeeping_staging)) {
+ first_cpu = cpumask_first_and(cpu_present_mask, housekeeping_staging);
+ if (first_cpu >= nr_cpu_ids || first_cpu >= setup_max_cpus) {
__cpumask_set_cpu(smp_processor_id(), housekeeping_staging);
__cpumask_clear_cpu(smp_processor_id(), non_housekeeping_mask);
if (!housekeeping.flags) {
@@ -138,6 +149,9 @@ static int __init housekeeping_setup(char *str, unsigned long flags)
}
}
+ if (cpumask_empty(non_housekeeping_mask))
+ goto free_housekeeping_staging;
+
if (!housekeeping.flags) {
/* First setup call ("nohz_full=" or "isolcpus=") */
enum hk_type type;
diff --git a/kernel/vmcore_info.c b/kernel/vmcore_info.c
index f95516cd45bbe..23c125c2e2436 100644
--- a/kernel/vmcore_info.c
+++ b/kernel/vmcore_info.c
@@ -205,11 +205,10 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_NUMBER(PG_head_mask);
#define PAGE_BUDDY_MAPCOUNT_VALUE (~PG_buddy)
VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
-#ifdef CONFIG_HUGETLB_PAGE
- VMCOREINFO_NUMBER(PG_hugetlb);
+#define PAGE_HUGETLB_MAPCOUNT_VALUE (~PG_hugetlb)
+ VMCOREINFO_NUMBER(PAGE_HUGETLB_MAPCOUNT_VALUE);
#define PAGE_OFFLINE_MAPCOUNT_VALUE (~PG_offline)
VMCOREINFO_NUMBER(PAGE_OFFLINE_MAPCOUNT_VALUE);
-#endif
#ifdef CONFIG_KALLSYMS
VMCOREINFO_SYMBOL(kallsyms_names);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0066c8f6c1544..d2dbe099286b9 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1277,8 +1277,12 @@ static bool kick_pool(struct worker_pool *pool)
!cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) {
struct work_struct *work = list_first_entry(&pool->worklist,
struct work_struct, entry);
- p->wake_cpu = cpumask_any_distribute(pool->attrs->__pod_cpumask);
- get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
+ int wake_cpu = cpumask_any_and_distribute(pool->attrs->__pod_cpumask,
+ cpu_online_mask);
+ if (wake_cpu < nr_cpu_ids) {
+ p->wake_cpu = wake_cpu;
+ get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
+ }
}
#endif
wake_up_process(p);
@@ -1594,6 +1598,15 @@ static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu)
if (off_cpu >= 0)
total_cpus--;
+ /* If all CPUs of the wq get offline, use the default values */
+ if (unlikely(!total_cpus)) {
+ for_each_node(node)
+ wq_node_nr_active(wq, node)->max = min_active;
+
+ wq_node_nr_active(wq, NUMA_NO_NODE)->max = max_active;
+ return;
+ }
+
for_each_node(node) {
int node_cpus;
@@ -1606,7 +1619,7 @@ static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu)
min_active, max_active);
}
- wq_node_nr_active(wq, NUMA_NO_NODE)->max = min_active;
+ wq_node_nr_active(wq, NUMA_NO_NODE)->max = max_active;
}
/**
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index c63a5fbf1f1c2..291185f54ee4c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -375,7 +375,7 @@ config DEBUG_INFO_SPLIT
Incompatible with older versions of ccache.
config DEBUG_INFO_BTF
- bool "Generate BTF typeinfo"
+ bool "Generate BTF type information"
depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
depends on BPF_SYSCALL
@@ -408,7 +408,8 @@ config PAHOLE_HAS_LANG_EXCLUDE
using DEBUG_INFO_BTF_MODULES.
config DEBUG_INFO_BTF_MODULES
- def_bool y
+ bool "Generate BTF type information for kernel modules"
+ default y
depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF
help
Generate compact split BTF type information for kernel modules.
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 68b45c82c37a6..7bc2220fea805 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -1124,7 +1124,7 @@ static ssize_t extract_user_to_sg(struct iov_iter *iter,
do {
res = iov_iter_extract_pages(iter, &pages, maxsize, sg_max,
extraction_flags, &off);
- if (res < 0)
+ if (res <= 0)
goto failed;
len = res;
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 68c97387aa54e..cd8f234552851 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -627,10 +627,10 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
/*
* Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic
- * contexts and I/O.
+ * contexts, I/O, nolockdep.
*/
alloc_flags &= ~GFP_ZONEMASK;
- alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
+ alloc_flags &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP);
alloc_flags |= __GFP_NOWARN;
page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER);
if (page)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 31d00eee028f1..ce7be5c244429 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1624,7 +1624,7 @@ static inline void __clear_hugetlb_destructor(struct hstate *h,
{
lockdep_assert_held(&hugetlb_lock);
- folio_clear_hugetlb(folio);
+ __folio_clear_hugetlb(folio);
}
/*
@@ -1711,7 +1711,7 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
h->surplus_huge_pages_node[nid]++;
}
- folio_set_hugetlb(folio);
+ __folio_set_hugetlb(folio);
folio_change_private(folio, NULL);
/*
* We have to set hugetlb_vmemmap_optimized again as above
@@ -1781,7 +1781,7 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
* If vmemmap pages were allocated above, then we need to clear the
* hugetlb destructor under the hugetlb lock.
*/
- if (clear_dtor) {
+ if (folio_test_hugetlb(folio)) {
spin_lock_irq(&hugetlb_lock);
__clear_hugetlb_destructor(h, folio);
spin_unlock_irq(&hugetlb_lock);
@@ -2049,7 +2049,7 @@ static void __prep_account_new_huge_page(struct hstate *h, int nid)
static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio)
{
- folio_set_hugetlb(folio);
+ __folio_set_hugetlb(folio);
INIT_LIST_HEAD(&folio->lru);
hugetlb_set_folio_subpool(folio, NULL);
set_hugetlb_cgroup(folio, NULL);
@@ -2160,22 +2160,6 @@ static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
}
/*
- * PageHuge() only returns true for hugetlbfs pages, but not for normal or
- * transparent huge pages. See the PageTransHuge() documentation for more
- * details.
- */
-int PageHuge(const struct page *page)
-{
- const struct folio *folio;
-
- if (!PageCompound(page))
- return 0;
- folio = page_folio(page);
- return folio_test_hugetlb(folio);
-}
-EXPORT_SYMBOL_GPL(PageHuge);
-
-/*
* Find and lock address space (mapping) in write mode.
*
* Upon entry, the page is locked which means that page_mapping() is
@@ -3268,9 +3252,12 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
rsv_adjust = hugepage_subpool_put_pages(spool, 1);
hugetlb_acct_memory(h, -rsv_adjust);
- if (deferred_reserve)
+ if (deferred_reserve) {
+ spin_lock_irq(&hugetlb_lock);
hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
pages_per_huge_page(h), folio);
+ spin_unlock_irq(&hugetlb_lock);
+ }
}
if (!memcg_charge_ret)
@@ -6274,6 +6261,12 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
VM_UFFD_MISSING);
}
+ if (!(vma->vm_flags & VM_MAYSHARE)) {
+ ret = vmf_anon_prepare(vmf);
+ if (unlikely(ret))
+ goto out;
+ }
+
folio = alloc_hugetlb_folio(vma, haddr, 0);
if (IS_ERR(folio)) {
/*
@@ -6310,15 +6303,12 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
*/
restore_reserve_on_error(h, vma, haddr, folio);
folio_put(folio);
+ ret = VM_FAULT_SIGBUS;
goto out;
}
new_pagecache_folio = true;
} else {
folio_lock(folio);
-
- ret = vmf_anon_prepare(vmf);
- if (unlikely(ret))
- goto backout_unlocked;
anon_rmap = 1;
}
} else {
diff --git a/mm/zswap.c b/mm/zswap.c
index caed028945b04..6f8850c44b616 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1331,15 +1331,22 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
if (!gfp_has_io_fs(sc->gfp_mask))
return 0;
-#ifdef CONFIG_MEMCG_KMEM
- mem_cgroup_flush_stats(memcg);
- nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
- nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
-#else
- /* use pool stats instead of memcg stats */
- nr_backing = zswap_pool_total_size >> PAGE_SHIFT;
- nr_stored = atomic_read(&zswap_nr_stored);
-#endif
+ /*
+ * For memcg, use the cgroup-wide ZSWAP stats since we don't
+ * have them per-node and thus per-lruvec. Careful if memcg is
+ * runtime-disabled: we can get sc->memcg == NULL, which is ok
+ * for the lruvec, but not for memcg_page_state().
+ *
+ * Without memcg, use the zswap pool-wide metrics.
+ */
+ if (!mem_cgroup_disabled()) {
+ mem_cgroup_flush_stats(memcg);
+ nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
+ nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
+ } else {
+ nr_backing = zswap_pool_total_size >> PAGE_SHIFT;
+ nr_stored = atomic_read(&zswap_nr_stored);
+ }
if (!nr_stored)
return 0;
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index f001582345052..9404dd551dfd2 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -478,6 +478,8 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head,
if (unlikely(!vhdr))
goto out;
+ NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = hlen;
+
type = vhdr->h_vlan_encapsulated_proto;
ptype = gro_find_receive_by_type(type);
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 7431f89e897b9..d7c35f55bd69f 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -266,7 +266,7 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
if (skb->dev == p->dev && ether_addr_equal(src, addr))
return;
- skb = skb_copy(skb, GFP_ATOMIC);
+ skb = pskb_copy(skb, GFP_ATOMIC);
if (!skb) {
DEV_STATS_INC(dev, tx_dropped);
return;
diff --git a/net/core/filter.c b/net/core/filter.c
index 29165744c5050..2510464692af0 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4362,10 +4362,12 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
enum bpf_map_type map_type = ri->map_type;
void *fwd = ri->tgt_value;
u32 map_id = ri->map_id;
+ u32 flags = ri->flags;
struct bpf_map *map;
int err;
ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
+ ri->flags = 0;
ri->map_type = BPF_MAP_TYPE_UNSPEC;
if (unlikely(!xdpf)) {
@@ -4377,11 +4379,20 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
case BPF_MAP_TYPE_DEVMAP:
fallthrough;
case BPF_MAP_TYPE_DEVMAP_HASH:
- map = READ_ONCE(ri->map);
- if (unlikely(map)) {
+ if (unlikely(flags & BPF_F_BROADCAST)) {
+ map = READ_ONCE(ri->map);
+
+ /* The map pointer is cleared when the map is being torn
+ * down by bpf_clear_redirect_map()
+ */
+ if (unlikely(!map)) {
+ err = -ENOENT;
+ break;
+ }
+
WRITE_ONCE(ri->map, NULL);
err = dev_map_enqueue_multi(xdpf, dev, map,
- ri->flags & BPF_F_EXCLUDE_INGRESS);
+ flags & BPF_F_EXCLUDE_INGRESS);
} else {
err = dev_map_enqueue(fwd, xdpf, dev);
}
@@ -4444,9 +4455,9 @@ EXPORT_SYMBOL_GPL(xdp_do_redirect_frame);
static int xdp_do_generic_redirect_map(struct net_device *dev,
struct sk_buff *skb,
struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog,
- void *fwd,
- enum bpf_map_type map_type, u32 map_id)
+ struct bpf_prog *xdp_prog, void *fwd,
+ enum bpf_map_type map_type, u32 map_id,
+ u32 flags)
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
struct bpf_map *map;
@@ -4456,11 +4467,20 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
case BPF_MAP_TYPE_DEVMAP:
fallthrough;
case BPF_MAP_TYPE_DEVMAP_HASH:
- map = READ_ONCE(ri->map);
- if (unlikely(map)) {
+ if (unlikely(flags & BPF_F_BROADCAST)) {
+ map = READ_ONCE(ri->map);
+
+ /* The map pointer is cleared when the map is being torn
+ * down by bpf_clear_redirect_map()
+ */
+ if (unlikely(!map)) {
+ err = -ENOENT;
+ break;
+ }
+
WRITE_ONCE(ri->map, NULL);
err = dev_map_redirect_multi(dev, skb, xdp_prog, map,
- ri->flags & BPF_F_EXCLUDE_INGRESS);
+ flags & BPF_F_EXCLUDE_INGRESS);
} else {
err = dev_map_generic_redirect(fwd, skb, xdp_prog);
}
@@ -4497,9 +4517,11 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
enum bpf_map_type map_type = ri->map_type;
void *fwd = ri->tgt_value;
u32 map_id = ri->map_id;
+ u32 flags = ri->flags;
int err;
ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
+ ri->flags = 0;
ri->map_type = BPF_MAP_TYPE_UNSPEC;
if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
@@ -4519,7 +4541,7 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
return 0;
}
- return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id);
+ return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id, flags);
err:
_trace_xdp_redirect_err(dev, xdp_prog, ri->tgt_index, err);
return err;
diff --git a/net/core/gro.c b/net/core/gro.c
index 2459ab697f7fd..99a45a5211c99 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -372,6 +372,7 @@ static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
const skb_frag_t *frag0;
unsigned int headlen;
+ NAPI_GRO_CB(skb)->network_offset = 0;
NAPI_GRO_CB(skb)->data_offset = 0;
headlen = skb_headlen(skb);
NAPI_GRO_CB(skb)->frag0 = skb->data;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5f382e94b4d1d..28cd640a6ea97 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2076,11 +2076,17 @@ static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
{
- int headerlen = skb_headroom(skb);
- unsigned int size = skb_end_offset(skb) + skb->data_len;
- struct sk_buff *n = __alloc_skb(size, gfp_mask,
- skb_alloc_rx_flag(skb), NUMA_NO_NODE);
+ struct sk_buff *n;
+ unsigned int size;
+ int headerlen;
+
+ if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
+ return NULL;
+ headerlen = skb_headroom(skb);
+ size = skb_end_offset(skb) + skb->data_len;
+ n = __alloc_skb(size, gfp_mask,
+ skb_alloc_rx_flag(skb), NUMA_NO_NODE);
if (!n)
return NULL;
@@ -2408,12 +2414,17 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
/*
* Allocate the copy buffer
*/
- struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
- gfp_mask, skb_alloc_rx_flag(skb),
- NUMA_NO_NODE);
- int oldheadroom = skb_headroom(skb);
int head_copy_len, head_copy_off;
+ struct sk_buff *n;
+ int oldheadroom;
+
+ if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
+ return NULL;
+ oldheadroom = skb_headroom(skb);
+ n = __alloc_skb(newheadroom + skb->len + newtailroom,
+ gfp_mask, skb_alloc_rx_flag(skb),
+ NUMA_NO_NODE);
if (!n)
return NULL;
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 4d75ef9d24bfa..fd20aae30be23 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -1226,11 +1226,8 @@ static void sk_psock_verdict_data_ready(struct sock *sk)
rcu_read_lock();
psock = sk_psock(sk);
- if (psock) {
- read_lock_bh(&sk->sk_callback_lock);
+ if (psock)
sk_psock_data_ready(sk, psock);
- read_unlock_bh(&sk->sk_callback_lock);
- }
rcu_read_unlock();
}
}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 486a8d4f53b17..a7bad18bc8b58 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1573,6 +1573,7 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
/* The above will be needed by the transport layer if there is one
* immediately following this IP hdr.
*/
+ NAPI_GRO_CB(skb)->inner_network_offset = off;
/* Note : No need to call skb_gro_postpull_rcsum() here,
* as we already checked checksum over ipv4 header was 0
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index b455bd05a7d5e..9500031a1f55b 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1473,7 +1473,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
* by icmp_hdr(skb)->type.
*/
if (sk->sk_type == SOCK_RAW &&
- !inet_test_bit(HDRINCL, sk))
+ !(fl4->flowi4_flags & FLOWI_FLAG_KNOWN_NH))
icmp_type = fl4->fl4_icmp_type;
else
icmp_type = icmp_hdr(skb)->type;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index dcb11f22cbf2b..4cb43401e0e06 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -612,6 +612,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
(hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
daddr, saddr, 0, 0, sk->sk_uid);
+ fl4.fl4_icmp_type = 0;
+ fl4.fl4_icmp_code = 0;
+
if (!hdrincl) {
rfv.msg = msg;
rfv.hlen = 0;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index fe55ff5d379b4..d0943c5a24e15 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -543,7 +543,8 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
__be16 sport, __be16 dport)
{
- const struct iphdr *iph = ip_hdr(skb);
+ const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
+ const struct iphdr *iph = (struct iphdr *)(skb->data + offset);
struct net *net = dev_net(skb->dev);
int iif, sdif;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 3498dd1d0694d..8721fe5beca2b 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -471,6 +471,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
struct sk_buff *p;
unsigned int ulen;
int ret = 0;
+ int flush;
/* requires non zero csum, for symmetry with GSO */
if (!uh->check) {
@@ -504,13 +505,22 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
return p;
}
+ flush = NAPI_GRO_CB(p)->flush;
+
+ if (NAPI_GRO_CB(p)->flush_id != 1 ||
+ NAPI_GRO_CB(p)->count != 1 ||
+ !NAPI_GRO_CB(p)->is_atomic)
+ flush |= NAPI_GRO_CB(p)->flush_id;
+ else
+ NAPI_GRO_CB(p)->is_atomic = false;
+
/* Terminate the flow on len mismatch or if it grow "too much".
* Under small packet flood GRO count could elsewhere grow a lot
* leading to excessive truesize values.
* On len mismatch merge the first packet shorter than gso_size,
* otherwise complete the GRO packet.
*/
- if (ulen > ntohs(uh2->len)) {
+ if (ulen > ntohs(uh2->len) || flush) {
pp = p;
} else {
if (NAPI_GRO_CB(skb)->is_flist) {
@@ -718,7 +728,8 @@ EXPORT_SYMBOL(udp_gro_complete);
INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
{
- const struct iphdr *iph = ip_hdr(skb);
+ const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
+ const struct iphdr *iph = (struct iphdr *)(skb->data + offset);
struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
/* do fraglist only if there is no outer UDP encap (or we already processed it) */
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index b41e35af69ea2..c8b909a9904f3 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -237,6 +237,7 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
goto out;
skb_set_network_header(skb, off);
+ NAPI_GRO_CB(skb)->inner_network_offset = off;
flush += ntohs(iph->payload_len) != skb->len - hlen;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 674eadfae569a..c81a07ac0463e 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -285,7 +285,8 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
__be16 sport, __be16 dport)
{
- const struct ipv6hdr *iph = ipv6_hdr(skb);
+ const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
+ const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
struct net *net = dev_net(skb->dev);
int iif, sdif;
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index bbd347de00b45..b41152dd42469 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -164,7 +164,8 @@ flush:
INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
{
- const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
+ const struct ipv6hdr *ipv6h = (struct ipv6hdr *)(skb->data + offset);
struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
/* do fraglist only if there is no outer UDP encap (or we already processed it) */
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 39e487ccc4688..8ba00ad433c21 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -127,6 +127,9 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
/* checksums verified by L2TP */
skb->ip_summed = CHECKSUM_NONE;
+ /* drop outer flow-hash */
+ skb_clear_hash(skb);
+
skb_dst_drop(skb);
nf_reset_ct(skb);
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index aff17597e6a71..bb8f96f2b86fe 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -3731,6 +3731,9 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_TOKENFALLBACKINIT);
mptcp_subflow_early_fallback(msk, subflow);
}
+
+ WRITE_ONCE(msk->write_seq, subflow->idsn);
+ WRITE_ONCE(msk->snd_nxt, subflow->idsn);
if (likely(!__mptcp_check_fallback(msk)))
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE);
diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c
index f4a38bd6a7e04..bfb7758063f31 100644
--- a/net/nsh/nsh.c
+++ b/net/nsh/nsh.c
@@ -77,13 +77,15 @@ EXPORT_SYMBOL_GPL(nsh_pop);
static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
+ unsigned int outer_hlen, mac_len, nsh_len;
struct sk_buff *segs = ERR_PTR(-EINVAL);
u16 mac_offset = skb->mac_header;
- unsigned int nsh_len, mac_len;
- __be16 proto;
+ __be16 outer_proto, proto;
skb_reset_network_header(skb);
+ outer_proto = skb->protocol;
+ outer_hlen = skb_mac_header_len(skb);
mac_len = skb->mac_len;
if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN)))
@@ -113,10 +115,10 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
}
for (skb = segs; skb; skb = skb->next) {
- skb->protocol = htons(ETH_P_NSH);
- __skb_push(skb, nsh_len);
- skb->mac_header = mac_offset;
- skb->network_header = skb->mac_header + mac_len;
+ skb->protocol = outer_proto;
+ __skb_push(skb, nsh_len + outer_hlen);
+ skb_reset_mac_header(skb);
+ skb_set_network_header(skb, outer_hlen);
skb->mac_len = mac_len;
}
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 0af4642aeec4b..1539d315afe74 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -119,18 +119,13 @@ struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *lo
switch (srx->transport.family) {
case AF_INET:
if (peer->srx.transport.sin.sin_port !=
- srx->transport.sin.sin_port ||
- peer->srx.transport.sin.sin_addr.s_addr !=
- srx->transport.sin.sin_addr.s_addr)
+ srx->transport.sin.sin_port)
goto not_found;
break;
#ifdef CONFIG_AF_RXRPC_IPV6
case AF_INET6:
if (peer->srx.transport.sin6.sin6_port !=
- srx->transport.sin6.sin6_port ||
- memcmp(&peer->srx.transport.sin6.sin6_addr,
- &srx->transport.sin6.sin6_addr,
- sizeof(struct in6_addr)) != 0)
+ srx->transport.sin6.sin6_port)
goto not_found;
break;
#endif
diff --git a/net/rxrpc/insecure.c b/net/rxrpc/insecure.c
index f2701068ed9e4..6716c021a532e 100644
--- a/net/rxrpc/insecure.c
+++ b/net/rxrpc/insecure.c
@@ -19,7 +19,7 @@ static int none_init_connection_security(struct rxrpc_connection *conn,
*/
static struct rxrpc_txbuf *none_alloc_txbuf(struct rxrpc_call *call, size_t remain, gfp_t gfp)
{
- return rxrpc_alloc_data_txbuf(call, min_t(size_t, remain, RXRPC_JUMBO_DATALEN), 0, gfp);
+ return rxrpc_alloc_data_txbuf(call, min_t(size_t, remain, RXRPC_JUMBO_DATALEN), 1, gfp);
}
static int none_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index f1a68270862db..48a1475e6b063 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -155,7 +155,7 @@ static struct rxrpc_txbuf *rxkad_alloc_txbuf(struct rxrpc_call *call, size_t rem
switch (call->conn->security_level) {
default:
space = min_t(size_t, remain, RXRPC_JUMBO_DATALEN);
- return rxrpc_alloc_data_txbuf(call, space, 0, gfp);
+ return rxrpc_alloc_data_txbuf(call, space, 1, gfp);
case RXRPC_SECURITY_AUTH:
shdr = sizeof(struct rxkad_level1_hdr);
break;
diff --git a/net/rxrpc/txbuf.c b/net/rxrpc/txbuf.c
index e0679658d9de0..c3913d8a50d34 100644
--- a/net/rxrpc/txbuf.c
+++ b/net/rxrpc/txbuf.c
@@ -21,20 +21,20 @@ struct rxrpc_txbuf *rxrpc_alloc_data_txbuf(struct rxrpc_call *call, size_t data_
{
struct rxrpc_wire_header *whdr;
struct rxrpc_txbuf *txb;
- size_t total, hoff = 0;
+ size_t total, hoff;
void *buf;
txb = kmalloc(sizeof(*txb), gfp);
if (!txb)
return NULL;
- if (data_align)
- hoff = round_up(sizeof(*whdr), data_align) - sizeof(*whdr);
+ hoff = round_up(sizeof(*whdr), data_align) - sizeof(*whdr);
total = hoff + sizeof(*whdr) + data_size;
+ data_align = umax(data_align, L1_CACHE_BYTES);
mutex_lock(&call->conn->tx_data_alloc_lock);
- buf = __page_frag_alloc_align(&call->conn->tx_data_alloc, total, gfp,
- ~(data_align - 1) & ~(L1_CACHE_BYTES - 1));
+ buf = page_frag_alloc_align(&call->conn->tx_data_alloc, total, gfp,
+ data_align);
mutex_unlock(&call->conn->tx_data_alloc_lock);
if (!buf) {
kfree(txb);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index bb9b747d58a1a..ce18716491c8f 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2664,6 +2664,7 @@ static void xs_tcp_tls_setup_socket(struct work_struct *work)
.xprtsec = {
.policy = RPC_XPRTSEC_NONE,
},
+ .stats = upper_clnt->cl_stats,
};
unsigned int pflags = current->flags;
struct rpc_clnt *lower_clnt;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 5c9fd4791c4ba..76284fc538ebd 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -142,9 +142,9 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
if (fragid == FIRST_FRAGMENT) {
if (unlikely(head))
goto err;
- *buf = NULL;
if (skb_has_frag_list(frag) && __skb_linearize(frag))
goto err;
+ *buf = NULL;
frag = skb_unshare(frag, GFP_ATOMIC);
if (unlikely(!frag))
goto err;
@@ -156,6 +156,11 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
if (!head)
goto err;
+ /* Either the input skb ownership is transferred to headskb
+ * or the input skb is freed, clear the reference to avoid
+ * bad access on error path.
+ */
+ *buf = NULL;
if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
kfree_skb_partial(frag, headstolen);
} else {
@@ -179,7 +184,6 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
*headbuf = NULL;
return 1;
}
- *buf = NULL;
return 0;
err:
kfree_skb(*buf);
diff --git a/rust/Makefile b/rust/Makefile
index 846e6ab9d5a9b..86a125c4243cb 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -175,7 +175,6 @@ quiet_cmd_rustdoc_test_kernel = RUSTDOC TK $<
mkdir -p $(objtree)/$(obj)/test/doctests/kernel; \
OBJTREE=$(abspath $(objtree)) \
$(RUSTDOC) --test $(rust_flags) \
- @$(objtree)/include/generated/rustc_cfg \
-L$(objtree)/$(obj) --extern alloc --extern kernel \
--extern build_error --extern macros \
--extern bindings --extern uapi \
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
index 424257284d167..09004b56fb65c 100644
--- a/rust/kernel/init.rs
+++ b/rust/kernel/init.rs
@@ -1292,8 +1292,15 @@ impl_zeroable! {
i8, i16, i32, i64, i128, isize,
f32, f64,
- // SAFETY: These are ZSTs, there is nothing to zero.
- {<T: ?Sized>} PhantomData<T>, core::marker::PhantomPinned, Infallible, (),
+ // Note: do not add uninhabited types (such as `!` or `core::convert::Infallible`) to this list;
+ // creating an instance of an uninhabited type is immediate undefined behavior. For more on
+ // uninhabited/empty types, consult The Rustonomicon:
+ // <https://doc.rust-lang.org/stable/nomicon/exotic-sizes.html#empty-types>. The Rust Reference
+ // also has information on undefined behavior:
+ // <https://doc.rust-lang.org/stable/reference/behavior-considered-undefined.html>.
+ //
+ // SAFETY: These are inhabited ZSTs; there is nothing to zero and a valid value exists.
+ {<T: ?Sized>} PhantomData<T>, core::marker::PhantomPinned, (),
// SAFETY: Type is allowed to take any value, including all zeros.
{<T>} MaybeUninit<T>,
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index be68d5e567b1a..6858e2f8a3ed9 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -65,7 +65,7 @@ const __LOG_PREFIX: &[u8] = b"rust_kernel\0";
/// The top level entrypoint to implementing a kernel module.
///
/// For any teardown or cleanup operations, your type may implement [`Drop`].
-pub trait Module: Sized + Sync {
+pub trait Module: Sized + Sync + Send {
/// Called at module initialization time.
///
/// Use this method to perform whatever setup or registration your module
diff --git a/rust/kernel/net/phy.rs b/rust/kernel/net/phy.rs
index 96e09c6e8530b..265d0e1c13710 100644
--- a/rust/kernel/net/phy.rs
+++ b/rust/kernel/net/phy.rs
@@ -640,6 +640,10 @@ pub struct Registration {
drivers: Pin<&'static mut [DriverVTable]>,
}
+// SAFETY: The only action allowed in a `Registration` instance is dropping it, which is safe to do
+// from any thread because `phy_drivers_unregister` can be called from any thread context.
+unsafe impl Send for Registration {}
+
impl Registration {
/// Registers a PHY driver.
pub fn register(
diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
index f489f31573832..520eae5fd7928 100644
--- a/rust/macros/lib.rs
+++ b/rust/macros/lib.rs
@@ -35,18 +35,6 @@ use proc_macro::TokenStream;
/// author: "Rust for Linux Contributors",
/// description: "My very own kernel module!",
/// license: "GPL",
-/// params: {
-/// my_i32: i32 {
-/// default: 42,
-/// permissions: 0o000,
-/// description: "Example of i32",
-/// },
-/// writeable_i32: i32 {
-/// default: 42,
-/// permissions: 0o644,
-/// description: "Example of i32",
-/// },
-/// },
/// }
///
/// struct MyModule;
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
index 27979e582e4b9..acd0393b50957 100644
--- a/rust/macros/module.rs
+++ b/rust/macros/module.rs
@@ -199,17 +199,6 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
/// Used by the printing macros, e.g. [`info!`].
const __LOG_PREFIX: &[u8] = b\"{name}\\0\";
- /// The \"Rust loadable module\" mark.
- //
- // This may be best done another way later on, e.g. as a new modinfo
- // key or a new section. For the moment, keep it simple.
- #[cfg(MODULE)]
- #[doc(hidden)]
- #[used]
- static __IS_RUST_MODULE: () = ();
-
- static mut __MOD: Option<{type_}> = None;
-
// SAFETY: `__this_module` is constructed by the kernel at load time and will not be
// freed until the module is unloaded.
#[cfg(MODULE)]
@@ -221,81 +210,132 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
kernel::ThisModule::from_ptr(core::ptr::null_mut())
}};
- // Loadable modules need to export the `{{init,cleanup}}_module` identifiers.
- /// # Safety
- ///
- /// This function must not be called after module initialization, because it may be
- /// freed after that completes.
- #[cfg(MODULE)]
- #[doc(hidden)]
- #[no_mangle]
- #[link_section = \".init.text\"]
- pub unsafe extern \"C\" fn init_module() -> core::ffi::c_int {{
- __init()
- }}
-
- #[cfg(MODULE)]
- #[doc(hidden)]
- #[no_mangle]
- pub extern \"C\" fn cleanup_module() {{
- __exit()
- }}
+ // Double nested modules, since then nobody can access the public items inside.
+ mod __module_init {{
+ mod __module_init {{
+ use super::super::{type_};
+
+ /// The \"Rust loadable module\" mark.
+ //
+ // This may be best done another way later on, e.g. as a new modinfo
+ // key or a new section. For the moment, keep it simple.
+ #[cfg(MODULE)]
+ #[doc(hidden)]
+ #[used]
+ static __IS_RUST_MODULE: () = ();
+
+ static mut __MOD: Option<{type_}> = None;
+
+ // Loadable modules need to export the `{{init,cleanup}}_module` identifiers.
+ /// # Safety
+ ///
+ /// This function must not be called after module initialization, because it may be
+ /// freed after that completes.
+ #[cfg(MODULE)]
+ #[doc(hidden)]
+ #[no_mangle]
+ #[link_section = \".init.text\"]
+ pub unsafe extern \"C\" fn init_module() -> core::ffi::c_int {{
+ // SAFETY: This function is inaccessible to the outside due to the double
+ // module wrapping it. It is called exactly once by the C side via its
+ // unique name.
+ unsafe {{ __init() }}
+ }}
- // Built-in modules are initialized through an initcall pointer
- // and the identifiers need to be unique.
- #[cfg(not(MODULE))]
- #[cfg(not(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS))]
- #[doc(hidden)]
- #[link_section = \"{initcall_section}\"]
- #[used]
- pub static __{name}_initcall: extern \"C\" fn() -> core::ffi::c_int = __{name}_init;
+ #[cfg(MODULE)]
+ #[doc(hidden)]
+ #[no_mangle]
+ pub extern \"C\" fn cleanup_module() {{
+ // SAFETY:
+ // - This function is inaccessible to the outside due to the double
+ // module wrapping it. It is called exactly once by the C side via its
+ // unique name,
+ // - furthermore it is only called after `init_module` has returned `0`
+ // (which delegates to `__init`).
+ unsafe {{ __exit() }}
+ }}
- #[cfg(not(MODULE))]
- #[cfg(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)]
- core::arch::global_asm!(
- r#\".section \"{initcall_section}\", \"a\"
- __{name}_initcall:
- .long __{name}_init - .
- .previous
- \"#
- );
+ // Built-in modules are initialized through an initcall pointer
+ // and the identifiers need to be unique.
+ #[cfg(not(MODULE))]
+ #[cfg(not(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS))]
+ #[doc(hidden)]
+ #[link_section = \"{initcall_section}\"]
+ #[used]
+ pub static __{name}_initcall: extern \"C\" fn() -> core::ffi::c_int = __{name}_init;
+
+ #[cfg(not(MODULE))]
+ #[cfg(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)]
+ core::arch::global_asm!(
+ r#\".section \"{initcall_section}\", \"a\"
+ __{name}_initcall:
+ .long __{name}_init - .
+ .previous
+ \"#
+ );
+
+ #[cfg(not(MODULE))]
+ #[doc(hidden)]
+ #[no_mangle]
+ pub extern \"C\" fn __{name}_init() -> core::ffi::c_int {{
+ // SAFETY: This function is inaccessible to the outside due to the double
+ // module wrapping it. It is called exactly once by the C side via its
+ // placement above in the initcall section.
+ unsafe {{ __init() }}
+ }}
- #[cfg(not(MODULE))]
- #[doc(hidden)]
- #[no_mangle]
- pub extern \"C\" fn __{name}_init() -> core::ffi::c_int {{
- __init()
- }}
+ #[cfg(not(MODULE))]
+ #[doc(hidden)]
+ #[no_mangle]
+ pub extern \"C\" fn __{name}_exit() {{
+ // SAFETY:
+ // - This function is inaccessible to the outside due to the double
+ // module wrapping it. It is called exactly once by the C side via its
+ // unique name,
+ // - furthermore it is only called after `__{name}_init` has returned `0`
+ // (which delegates to `__init`).
+ unsafe {{ __exit() }}
+ }}
- #[cfg(not(MODULE))]
- #[doc(hidden)]
- #[no_mangle]
- pub extern \"C\" fn __{name}_exit() {{
- __exit()
- }}
+ /// # Safety
+ ///
+ /// This function must only be called once.
+ unsafe fn __init() -> core::ffi::c_int {{
+ match <{type_} as kernel::Module>::init(&super::super::THIS_MODULE) {{
+ Ok(m) => {{
+ // SAFETY: No data race, since `__MOD` can only be accessed by this
+ // module and there only `__init` and `__exit` access it. These
+ // functions are only called once and `__exit` cannot be called
+ // before or during `__init`.
+ unsafe {{
+ __MOD = Some(m);
+ }}
+ return 0;
+ }}
+ Err(e) => {{
+ return e.to_errno();
+ }}
+ }}
+ }}
- fn __init() -> core::ffi::c_int {{
- match <{type_} as kernel::Module>::init(&THIS_MODULE) {{
- Ok(m) => {{
+ /// # Safety
+ ///
+ /// This function must
+ /// - only be called once,
+ /// - be called after `__init` has been called and returned `0`.
+ unsafe fn __exit() {{
+ // SAFETY: No data race, since `__MOD` can only be accessed by this module
+ // and there only `__init` and `__exit` access it. These functions are only
+ // called once and `__init` was already called.
unsafe {{
- __MOD = Some(m);
+ // Invokes `drop()` on `__MOD`, which should be used for cleanup.
+ __MOD = None;
}}
- return 0;
- }}
- Err(e) => {{
- return e.to_errno();
}}
- }}
- }}
- fn __exit() {{
- unsafe {{
- // Invokes `drop()` on `__MOD`, which should be used for cleanup.
- __MOD = None;
+ {modinfo}
}}
}}
-
- {modinfo}
",
type_ = info.type_,
name = info.name,
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index baf86c0880b6d..533a7799fdfe6 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -273,7 +273,7 @@ rust_common_cmd = \
-Zallow-features=$(rust_allowed_features) \
-Zcrate-attr=no_std \
-Zcrate-attr='feature($(rust_allowed_features))' \
- --extern alloc --extern kernel \
+ -Zunstable-options --extern force:alloc --extern kernel \
--crate-type rlib -L $(objtree)/rust/ \
--crate-name $(basename $(notdir $@)) \
--sysroot=/dev/null \
diff --git a/tools/perf/arch/riscv/util/header.c b/tools/perf/arch/riscv/util/header.c
index 4a41856938a88..1b29030021eeb 100644
--- a/tools/perf/arch/riscv/util/header.c
+++ b/tools/perf/arch/riscv/util/header.c
@@ -41,7 +41,7 @@ static char *_get_cpuid(void)
char *mimpid = NULL;
char *cpuid = NULL;
int read;
- unsigned long line_sz;
+ size_t line_sz;
FILE *cpuinfo;
cpuinfo = fopen(CPUINFO, "r");
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index eb2b78552ca29..0b3cef2c5159b 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -205,6 +205,9 @@ __weak noinline struct file *bpf_testmod_return_ptr(int arg)
case 5: return (void *)~(1ull << 30); /* trigger extable */
case 6: return &f; /* valid addr */
case 7: return (void *)((long)&f | 1); /* kernel tricks */
+#ifdef CONFIG_X86_64
+ case 8: return (void *)VSYSCALL_ADDR; /* vsyscall page address */
+#endif
default: return NULL;
}
}
diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
index ba3ddeda24bf5..d98702b6955df 100644
--- a/tools/testing/selftests/kselftest_harness.h
+++ b/tools/testing/selftests/kselftest_harness.h
@@ -56,7 +56,6 @@
#include <asm/types.h>
#include <ctype.h>
#include <errno.h>
-#include <limits.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
@@ -1159,7 +1158,7 @@ void __run_test(struct __fixture_metadata *f,
struct __test_metadata *t)
{
struct __test_xfail *xfail;
- char test_name[LINE_MAX];
+ char *test_name;
const char *diagnostic;
/* reset test struct */
@@ -1167,8 +1166,12 @@ void __run_test(struct __fixture_metadata *f,
t->trigger = 0;
memset(t->results->reason, 0, sizeof(t->results->reason));
- snprintf(test_name, sizeof(test_name), "%s%s%s.%s",
- f->name, variant->name[0] ? "." : "", variant->name, t->name);
+ if (asprintf(&test_name, "%s%s%s.%s", f->name,
+ variant->name[0] ? "." : "", variant->name, t->name) == -1) {
+ ksft_print_msg("ERROR ALLOCATING MEMORY\n");
+ t->exit_code = KSFT_FAIL;
+ _exit(t->exit_code);
+ }
ksft_print_msg(" RUN %s ...\n", test_name);
@@ -1206,6 +1209,7 @@ void __run_test(struct __fixture_metadata *f,
ksft_test_result_code(t->exit_code, test_name,
diagnostic ? "%s" : NULL, diagnostic);
+ free(test_name);
}
static int test_harness_run(int argc, char **argv)
diff --git a/tools/testing/selftests/kvm/aarch64/vgic_init.c b/tools/testing/selftests/kvm/aarch64/vgic_init.c
index eef816b80993f..ca917c71ff602 100644
--- a/tools/testing/selftests/kvm/aarch64/vgic_init.c
+++ b/tools/testing/selftests/kvm/aarch64/vgic_init.c
@@ -84,6 +84,18 @@ static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type,
return v;
}
+static struct vm_gic vm_gic_create_barebones(uint32_t gic_dev_type)
+{
+ struct vm_gic v;
+
+ v.gic_dev_type = gic_dev_type;
+ v.vm = vm_create_barebones();
+ v.gic_fd = kvm_create_device(v.vm, gic_dev_type);
+
+ return v;
+}
+
+
static void vm_gic_destroy(struct vm_gic *v)
{
close(v->gic_fd);
@@ -357,6 +369,40 @@ static void test_vcpus_then_vgic(uint32_t gic_dev_type)
vm_gic_destroy(&v);
}
+#define KVM_VGIC_V2_ATTR(offset, cpu) \
+ (FIELD_PREP(KVM_DEV_ARM_VGIC_OFFSET_MASK, offset) | \
+ FIELD_PREP(KVM_DEV_ARM_VGIC_CPUID_MASK, cpu))
+
+#define GIC_CPU_CTRL 0x00
+
+static void test_v2_uaccess_cpuif_no_vcpus(void)
+{
+ struct vm_gic v;
+ u64 val = 0;
+ int ret;
+
+ v = vm_gic_create_barebones(KVM_DEV_TYPE_ARM_VGIC_V2);
+ subtest_dist_rdist(&v);
+
+ ret = __kvm_has_device_attr(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS,
+ KVM_VGIC_V2_ATTR(GIC_CPU_CTRL, 0));
+ TEST_ASSERT(ret && errno == EINVAL,
+ "accessed non-existent CPU interface, want errno: %i",
+ EINVAL);
+ ret = __kvm_device_attr_get(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS,
+ KVM_VGIC_V2_ATTR(GIC_CPU_CTRL, 0), &val);
+ TEST_ASSERT(ret && errno == EINVAL,
+ "accessed non-existent CPU interface, want errno: %i",
+ EINVAL);
+ ret = __kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS,
+ KVM_VGIC_V2_ATTR(GIC_CPU_CTRL, 0), &val);
+ TEST_ASSERT(ret && errno == EINVAL,
+ "accessed non-existent CPU interface, want errno: %i",
+ EINVAL);
+
+ vm_gic_destroy(&v);
+}
+
static void test_v3_new_redist_regions(void)
{
struct kvm_vcpu *vcpus[NR_VCPUS];
@@ -675,6 +721,9 @@ void run_tests(uint32_t gic_dev_type)
test_vcpus_then_vgic(gic_dev_type);
test_vgic_then_vcpus(gic_dev_type);
+ if (VGIC_DEV_IS_V2(gic_dev_type))
+ test_v2_uaccess_cpuif_no_vcpus();
+
if (VGIC_DEV_IS_V3(gic_dev_type)) {
test_v3_new_redist_regions();
test_v3_typer_accesses();
diff --git a/tools/testing/selftests/mm/mdwe_test.c b/tools/testing/selftests/mm/mdwe_test.c
index 200bedcdc32e9..1e01d3ddc11c5 100644
--- a/tools/testing/selftests/mm/mdwe_test.c
+++ b/tools/testing/selftests/mm/mdwe_test.c
@@ -7,6 +7,7 @@
#include <linux/mman.h>
#include <linux/prctl.h>
+#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <sys/auxv.h>
diff --git a/tools/testing/selftests/mm/protection_keys.c b/tools/testing/selftests/mm/protection_keys.c
index 374a308174d2b..48dc151f8fca8 100644
--- a/tools/testing/selftests/mm/protection_keys.c
+++ b/tools/testing/selftests/mm/protection_keys.c
@@ -54,7 +54,6 @@ int test_nr;
u64 shadow_pkey_reg;
int dprint_in_signal;
char dprint_in_signal_buffer[DPRINT_IN_SIGNAL_BUF_SIZE];
-char buf[256];
void cat_into_file(char *str, char *file)
{
@@ -1745,42 +1744,6 @@ void pkey_setup_shadow(void)
shadow_pkey_reg = __read_pkey_reg();
}
-pid_t parent_pid;
-
-void restore_settings_atexit(void)
-{
- if (parent_pid == getpid())
- cat_into_file(buf, "/proc/sys/vm/nr_hugepages");
-}
-
-void save_settings(void)
-{
- int fd;
- int err;
-
- if (geteuid())
- return;
-
- fd = open("/proc/sys/vm/nr_hugepages", O_RDONLY);
- if (fd < 0) {
- fprintf(stderr, "error opening\n");
- perror("error: ");
- exit(__LINE__);
- }
-
- /* -1 to guarantee leaving the trailing \0 */
- err = read(fd, buf, sizeof(buf)-1);
- if (err < 0) {
- fprintf(stderr, "error reading\n");
- perror("error: ");
- exit(__LINE__);
- }
-
- parent_pid = getpid();
- atexit(restore_settings_atexit);
- close(fd);
-}
-
int main(void)
{
int nr_iterations = 22;
@@ -1788,7 +1751,6 @@ int main(void)
srand((unsigned int)time(NULL));
- save_settings();
setup_handlers();
printf("has pkeys: %d\n", pkeys_supported);
diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh
index c2c542fe7b17b..4bdb3a0c7a606 100755
--- a/tools/testing/selftests/mm/run_vmtests.sh
+++ b/tools/testing/selftests/mm/run_vmtests.sh
@@ -385,6 +385,7 @@ CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 0
CATEGORY="ksm" run_test ./ksm_functional_tests
# protection_keys tests
+nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
if [ -x ./protection_keys_32 ]
then
CATEGORY="pkey" run_test ./protection_keys_32
@@ -394,6 +395,7 @@ if [ -x ./protection_keys_64 ]
then
CATEGORY="pkey" run_test ./protection_keys_64
fi
+echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages
if [ -x ./soft-dirty ]
then
diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c
index 6c988bd2f3356..d3c7f5fb3e7b7 100644
--- a/tools/testing/selftests/mm/split_huge_page_test.c
+++ b/tools/testing/selftests/mm/split_huge_page_test.c
@@ -300,7 +300,7 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd,
char **addr)
{
size_t i;
- int dummy;
+ int __attribute__((unused)) dummy = 0;
srand(time(NULL));
diff --git a/tools/testing/selftests/riscv/hwprobe/cbo.c b/tools/testing/selftests/riscv/hwprobe/cbo.c
index c537d52fafc58..a40541bb7c7de 100644
--- a/tools/testing/selftests/riscv/hwprobe/cbo.c
+++ b/tools/testing/selftests/riscv/hwprobe/cbo.c
@@ -19,7 +19,7 @@
#include "hwprobe.h"
#include "../../kselftest.h"
-#define MK_CBO(fn) cpu_to_le32((fn) << 20 | 10 << 15 | 2 << 12 | 0 << 7 | 15)
+#define MK_CBO(fn) le32_bswap((uint32_t)(fn) << 20 | 10 << 15 | 2 << 12 | 0 << 7 | 15)
static char mem[4096] __aligned(4096) = { [0 ... 4095] = 0xa5 };
diff --git a/tools/testing/selftests/riscv/hwprobe/hwprobe.h b/tools/testing/selftests/riscv/hwprobe/hwprobe.h
index e3fccb390c4dc..f3de970c32227 100644
--- a/tools/testing/selftests/riscv/hwprobe/hwprobe.h
+++ b/tools/testing/selftests/riscv/hwprobe/hwprobe.h
@@ -4,6 +4,16 @@
#include <stddef.h>
#include <asm/hwprobe.h>
+#if __BYTE_ORDER == __BIG_ENDIAN
+# define le32_bswap(_x) \
+ ((((_x) & 0x000000ffU) << 24) | \
+ (((_x) & 0x0000ff00U) << 8) | \
+ (((_x) & 0x00ff0000U) >> 8) | \
+ (((_x) & 0xff000000U) >> 24))
+#else
+# define le32_bswap(_x) (_x)
+#endif
+
/*
* Rather than relying on having a new enough libc to define this, just do it
* ourselves. This way we don't need to be coupled to a new-enough libc to
diff --git a/tools/testing/selftests/syscall_user_dispatch/sud_test.c b/tools/testing/selftests/syscall_user_dispatch/sud_test.c
index b5d592d4099e8..d975a67673299 100644
--- a/tools/testing/selftests/syscall_user_dispatch/sud_test.c
+++ b/tools/testing/selftests/syscall_user_dispatch/sud_test.c
@@ -158,6 +158,20 @@ static void handle_sigsys(int sig, siginfo_t *info, void *ucontext)
/* In preparation for sigreturn. */
SYSCALL_DISPATCH_OFF(glob_sel);
+
+ /*
+ * The tests for argument handling assume that `syscall(x) == x`. This
+ * is a NOP on x86 because the syscall number is passed in %rax, which
+ * happens to also be the function ABI return register. Other
+ * architectures may need to swizzle the arguments around.
+ */
+#if defined(__riscv)
+/* REG_A7 is not defined in libc headers */
+# define REG_A7 (REG_A0 + 7)
+
+ ((ucontext_t *)ucontext)->uc_mcontext.__gregs[REG_A0] =
+ ((ucontext_t *)ucontext)->uc_mcontext.__gregs[REG_A7];
+#endif
}
TEST(dispatch_and_return)