From 5e8b6a40be80806f3baaef417c7be376e6b23261 Mon Sep 17 00:00:00 2001 From: Mingzheng Xing Date: Fri, 17 Oct 2025 11:58:28 +0800 Subject: [PATCH] riscv: upgrade to 6.6.0-112.0.0 RISC-V kernel upgrade to 6.6.0-112.0.0 Sync patches from the rvck-olk repository, including: - ACPI, IOMMU drivers; - Key RISC-V extensions support; - Add support for K1, SG2044, and DP1000. Signed-off-by: Mingzheng Xing --- 0001-riscv-kernel.patch | 56897 +++++++++++++++++++++++++++++++++++++- kernel.spec | 9 +- 2 files changed, 55522 insertions(+), 1384 deletions(-) diff --git a/0001-riscv-kernel.patch b/0001-riscv-kernel.patch index b284eb30..9ee1fe20 100644 --- a/0001-riscv-kernel.patch +++ b/0001-riscv-kernel.patch @@ -1,25 +1,42 @@ -From aeaeeb3c790bff7d8d8d9a31d62685fbef521318 Mon Sep 17 00:00:00 2001 +From ada38d466e22fe6ebdb687bd441c6ad3347b03d5 Mon Sep 17 00:00:00 2001 From: Mingzheng Xing -Date: Mon, 17 Mar 2025 12:34:24 +0800 +Date: Fri, 17 Oct 2025 10:24:50 +0800 Subject: [PATCH] riscv kernel Signed-off-by: Mingzheng Xing --- + .github/workflows/main.yml | 20 + + Documentation/arch/index.rst | 2 +- + Documentation/{ => arch}/riscv/acpi.rst | 0 + .../{ => arch}/riscv/boot-image-header.rst | 0 + Documentation/{ => arch}/riscv/boot.rst | 0 + Documentation/{ => arch}/riscv/features.rst | 0 + Documentation/arch/riscv/hwprobe.rst | 271 + + Documentation/{ => arch}/riscv/index.rst | 0 + .../{ => arch}/riscv/patch-acceptance.rst | 0 + Documentation/{ => arch}/riscv/uabi.rst | 0 + Documentation/{ => arch}/riscv/vector.rst | 0 + Documentation/{ => arch}/riscv/vm-layout.rst | 0 .../hwlock/xuantie,th1520-hwspinlock.yaml | 34 + .../bindings/iio/adc/thead,th1520-adc.yaml | 52 + .../bindings/iio/adc/xuantie,th1520-adc.yaml | 52 + .../interrupt-controller/riscv,aplic.yaml | 172 + .../interrupt-controller/riscv,imsics.yaml | 172 + + .../sifive,plic-1.0.0.yaml | 1 + + .../thead,c900-aclint-sswi.yaml | 58 + + .../bindings/iommu/riscv,iommu.yaml | 147 + .../mailbox/xuantie-th1520-mailbox.txt | 57 + .../bindings/mmc/snps,dwcmshc-sdhci.yaml | 1 + .../devicetree/bindings/net/snps,dwmac.yaml | 2 + .../bindings/net/xuantie,dwmac.yaml | 77 + .../bindings/nvmem/xuantie,th1520-efuse.txt | 18 + .../pinctrl/thead,th1520-pinctrl.yaml | 374 + + .../pinctrl/ultrarisc,dp1000-pinctrl.yaml | 105 + .../bindings/pwm/xuantie,th1520-pwm.yaml | 44 + .../bindings/reset/xuantie,th1520-reset.yaml | 45 + .../devicetree/bindings/riscv/extensions.yaml | 6 + .../devicetree/bindings/rtc/xgene-rtc.txt | 16 + + .../bindings/serial/snps-dw-apb-uart.yaml | 4 + .../soc/xuantie/xuantie,th1520-event.yaml | 37 + .../bindings/sound/everest,es7210.txt | 12 + .../bindings/sound/everest,es8156.yaml | 42 + @@ -29,19 +46,36 @@ Signed-off-by: Mingzheng Xing .../bindings/spi/xuantie,th1520-qspi.yaml | 52 + .../bindings/spi/xuantie,th1520-spi.yaml | 58 + .../bindings/usb/xuantie,th1520-usb.yaml | 76 + + .../devicetree/bindings/vendor-prefixes.yaml | 2 + .../bindings/watchdog/xuantie,th1520-wdt.yaml | 19 + .../membarrier-sync-core/arch-support.txt | 18 +- + .../maintainer/maintainer-entry-profile.rst | 2 +- + Documentation/process/index.rst | 2 +- + Documentation/riscv/hwprobe.rst | 98 - Documentation/scheduler/index.rst | 1 + Documentation/scheduler/membarrier.rst | 39 + - MAINTAINERS | 18 + + .../it_IT/riscv/patch-acceptance.rst | 2 +- + .../translations/zh_CN/arch/index.rst | 2 +- + .../{ => arch}/riscv/boot-image-header.rst | 4 +- + .../zh_CN/{ => arch}/riscv/index.rst | 4 +- + .../{ => arch}/riscv/patch-acceptance.rst | 4 +- + .../zh_CN/{ => arch}/riscv/vm-layout.rst | 4 +- + .../maintainer/maintainer-entry-profile.rst | 2 +- + MAINTAINERS | 29 +- + arch/arm64/Kconfig | 1 - arch/arm64/include/asm/tlb.h | 5 +- + arch/arm64/kernel/pci.c | 191 - + arch/ia64/Kconfig | 1 - + arch/loongarch/Kconfig | 1 - arch/loongarch/include/asm/pgalloc.h | 1 + + arch/loongarch/kernel/dma.c | 9 +- arch/mips/include/asm/pgalloc.h | 1 + - arch/riscv/Kconfig | 41 +- - arch/riscv/Kconfig.socs | 12 + - arch/riscv/Makefile | 19 +- - arch/riscv/Makefile.isa | 18 + - arch/riscv/boot/dts/Makefile | 1 + + arch/riscv/Kconfig | 135 +- + arch/riscv/Kconfig.socs | 59 + + arch/riscv/Kconfig.vendor | 19 + + arch/riscv/Makefile | 23 +- + arch/riscv/Makefile.isa | 15 + + arch/riscv/boot/dts/Makefile | 3 + arch/riscv/boot/dts/sophgo/Makefile | 7 + .../riscv/boot/dts/sophgo/mango-2sockets.dtsi | 699 + .../boot/dts/sophgo/mango-clock-socket0.dtsi | 124 + @@ -63,6 +97,10 @@ Signed-off-by: Mingzheng Xing .../boot/dts/sophgo/mango-top-intc2.dtsi | 62 + .../boot/dts/sophgo/mango-yixin-s2110.dts | 63 + arch/riscv/boot/dts/sophgo/mango.dtsi | 938 + + arch/riscv/boot/dts/spacemit/Makefile | 2 + + .../boot/dts/spacemit/k1-bananapi-f3.dts | 448 + + arch/riscv/boot/dts/spacemit/k1-x.dtsi | 1221 ++ + .../riscv/boot/dts/spacemit/k1-x_pinctrl.dtsi | 1192 ++ arch/riscv/boot/dts/thead/Makefile | 3 +- .../boot/dts/thead/th1520-beaglev-ahead.dts | 222 +- .../dts/thead/th1520-lichee-module-4a.dtsi | 440 +- @@ -71,45 +109,146 @@ Signed-off-by: Mingzheng Xing .../boot/dts/thead/th1520-lpi4a-dsi0.dts | 63 + .../boot/dts/thead/th1520-lpi4a-hx8279.dts | 63 + arch/riscv/boot/dts/thead/th1520.dtsi | 2048 +- - arch/riscv/configs/defconfig | 22 +- - arch/riscv/configs/openeuler_defconfig | 1913 +- + arch/riscv/boot/dts/ultrarisc/Makefile | 2 + + .../dts/ultrarisc/dp1000-evb-pinctrl.dtsi | 149 + + .../boot/dts/ultrarisc/dp1000-evb-v1.dts | 53 + + arch/riscv/boot/dts/ultrarisc/dp1000.dts | 533 + + arch/riscv/configs/defconfig | 23 +- + arch/riscv/configs/dp1000_defconfig | 5530 ++++++ + arch/riscv/configs/k1_defconfig | 31 + + arch/riscv/configs/openeuler_defconfig | 1968 +- arch/riscv/configs/sg2042_defconfig | 9 + arch/riscv/configs/th1520_defconfig | 470 + - arch/riscv/include/asm/barrier.h | 22 + - arch/riscv/include/asm/errata_list.h | 32 +- - arch/riscv/include/asm/hwcap.h | 1 + - arch/riscv/include/asm/io.h | 4 + + arch/riscv/errata/andes/errata.c | 13 +- + arch/riscv/errata/sifive/errata.c | 3 + + arch/riscv/errata/thead/errata.c | 3 + + arch/riscv/include/asm/acpi.h | 21 +- + arch/riscv/include/asm/arch_hweight.h | 78 + + arch/riscv/include/asm/archrandom.h | 72 + + arch/riscv/include/asm/atomic.h | 17 +- + arch/riscv/include/asm/barrier.h | 58 +- + arch/riscv/include/asm/bitops.h | 258 +- + arch/riscv/include/asm/cmpxchg.h | 496 +- + arch/riscv/include/asm/compat.h | 1 - + arch/riscv/include/asm/cpufeature-macros.h | 66 + + arch/riscv/include/asm/cpufeature.h | 69 + + arch/riscv/include/asm/csr.h | 13 + + arch/riscv/include/asm/dmi.h | 24 + + arch/riscv/include/asm/elf.h | 2 +- + arch/riscv/include/asm/errata_list.h | 45 +- + arch/riscv/include/asm/fence.h | 10 +- + arch/riscv/include/asm/hwcap.h | 141 +- + arch/riscv/include/asm/hwprobe.h | 26 +- + arch/riscv/include/asm/insn-def.h | 4 + + arch/riscv/include/asm/io.h | 12 +- + arch/riscv/include/asm/irq.h | 60 + + arch/riscv/include/asm/kvm_aia_aplic.h | 58 - + arch/riscv/include/asm/kvm_aia_imsic.h | 38 - arch/riscv/include/asm/membarrier.h | 19 + + arch/riscv/include/asm/mmio.h | 5 +- + arch/riscv/include/asm/mmiowb.h | 2 +- arch/riscv/include/asm/pgalloc.h | 53 +- arch/riscv/include/asm/pgtable-64.h | 14 +- - arch/riscv/include/asm/pgtable.h | 9 +- + arch/riscv/include/asm/pgtable.h | 21 +- + arch/riscv/include/asm/processor.h | 6 + arch/riscv/include/asm/sbi.h | 9 + arch/riscv/include/asm/sparsemem.h | 2 +- - arch/riscv/include/asm/switch_to.h | 15 + + arch/riscv/include/asm/suspend.h | 5 +- + arch/riscv/include/asm/switch_to.h | 17 +- arch/riscv/include/asm/sync_core.h | 29 + arch/riscv/include/asm/tlb.h | 18 + - arch/riscv/kernel/cpufeature.c | 1 + + arch/riscv/include/asm/vdso/processor.h | 8 +- + arch/riscv/include/asm/vector.h | 12 +- + arch/riscv/include/asm/vendor_extensions.h | 103 + + .../include/asm/vendor_extensions/andes.h | 19 + + arch/riscv/include/asm/vendorid_list.h | 2 +- + arch/riscv/include/uapi/asm/hwprobe.h | 52 +- + arch/riscv/kernel/Makefile | 4 + + arch/riscv/kernel/acpi.c | 135 +- + arch/riscv/kernel/acpi_numa.c | 130 + + arch/riscv/kernel/alternative.c | 2 +- + arch/riscv/kernel/cpufeature.c | 579 +- arch/riscv/kernel/module.c | 83 +- arch/riscv/kernel/process.c | 3 + arch/riscv/kernel/sbi-ipi.c | 46 +- - arch/riscv/kernel/suspend.c | 44 + - arch/riscv/kernel/vector.c | 3 +- + arch/riscv/kernel/setup.c | 8 +- + arch/riscv/kernel/smp.c | 17 + + arch/riscv/kernel/smpboot.c | 4 +- + arch/riscv/kernel/suspend.c | 100 +- + arch/riscv/kernel/sys_hwprobe.c | 349 + + arch/riscv/kernel/sys_riscv.c | 267 - + arch/riscv/kernel/vdso/hwprobe.c | 86 +- + arch/riscv/kernel/vector.c | 8 +- + arch/riscv/kernel/vendor_extensions.c | 56 + + arch/riscv/kernel/vendor_extensions/Makefile | 3 + + arch/riscv/kernel/vendor_extensions/andes.c | 18 + + arch/riscv/kvm/aia.c | 37 +- + arch/riscv/kvm/aia_aplic.c | 2 +- + arch/riscv/kvm/aia_device.c | 2 +- + arch/riscv/kvm/aia_imsic.c | 2 +- + arch/riscv/kvm/main.c | 2 +- + arch/riscv/kvm/tlb.c | 2 +- + arch/riscv/kvm/vcpu_fp.c | 2 +- + arch/riscv/kvm/vcpu_onereg.c | 2 +- + arch/riscv/kvm/vcpu_vector.c | 2 +- + arch/riscv/lib/Makefile | 1 + + arch/riscv/lib/crc32.c | 294 + + arch/riscv/mm/cacheflush.c | 25 +- arch/riscv/mm/dma-noncoherent.c | 9 +- arch/riscv/mm/pgtable.c | 2 + arch/riscv/mm/tlbflush.c | 31 + + arch/sw_64/Kconfig | 1 - arch/x86/include/asm/hw_irq.h | 2 - arch/x86/mm/pgtable.c | 3 + + drivers/acpi/Kconfig | 2 +- + drivers/acpi/Makefile | 2 +- + drivers/acpi/acpi_apd.c | 21 +- + drivers/acpi/acpi_lpss.c | 15 +- + drivers/acpi/arm64/dma.c | 17 +- + drivers/acpi/arm64/iort.c | 20 +- + drivers/acpi/bus.c | 4 + + drivers/acpi/internal.h | 8 + + drivers/acpi/mipi-disco-img.c | 292 + + drivers/acpi/numa/Kconfig | 5 +- + drivers/acpi/numa/srat.c | 34 +- + drivers/acpi/pci_link.c | 2 + + drivers/acpi/pci_mcfg.c | 17 + + drivers/acpi/riscv/Makefile | 4 +- + drivers/acpi/riscv/cppc.c | 157 + + drivers/acpi/riscv/cpuidle.c | 81 + + drivers/acpi/riscv/init.c | 13 + + drivers/acpi/riscv/init.h | 4 + + drivers/acpi/riscv/irq.c | 335 + + drivers/acpi/riscv/rhct.c | 93 +- + drivers/acpi/scan.c | 151 +- + drivers/acpi/thermal.c | 56 +- + drivers/acpi/utils.c | 138 +- + drivers/base/arch_numa.c | 2 +- drivers/base/platform-msi.c | 149 +- drivers/char/ipmi/ipmi_si_hardcode.c | 26 +- drivers/char/ipmi/ipmi_si_intf.c | 3 +- drivers/char/ipmi/ipmi_si_pci.c | 6 + - drivers/clk/Kconfig | 1 + - drivers/clk/Makefile | 2 + + drivers/clk/Kconfig | 2 + + drivers/clk/Makefile | 3 + drivers/clk/sophgo/Makefile | 3 + - drivers/clk/sophgo/clk-dummy.c | 600 + + drivers/clk/sophgo/clk-dummy.c | 594 + drivers/clk/sophgo/clk-mango.c | 977 + - drivers/clk/sophgo/clk.c | 883 + + drivers/clk/sophgo/clk.c | 881 + drivers/clk/sophgo/clk.h | 152 + + drivers/clk/spacemit/Kconfig | 9 + + drivers/clk/spacemit/Makefile | 11 + + drivers/clk/spacemit/ccu-spacemit-k1x.c | 2123 ++ + drivers/clk/spacemit/ccu-spacemit-k1x.h | 81 + + drivers/clk/spacemit/ccu_ddn.c | 161 + + drivers/clk/spacemit/ccu_ddn.h | 86 + + drivers/clk/spacemit/ccu_ddr.c | 272 + + drivers/clk/spacemit/ccu_ddr.h | 44 + + drivers/clk/spacemit/ccu_dpll.c | 124 + + drivers/clk/spacemit/ccu_dpll.h | 76 + + drivers/clk/spacemit/ccu_mix.c | 502 + + drivers/clk/spacemit/ccu_mix.h | 380 + + drivers/clk/spacemit/ccu_pll.c | 286 + + drivers/clk/spacemit/ccu_pll.h | 79 + drivers/clk/xuantie/Kconfig | 12 + drivers/clk/xuantie/Makefile | 7 + drivers/clk/xuantie/clk-th1520-fm.c | 646 + @@ -124,21 +263,33 @@ Signed-off-by: Mingzheng Xing drivers/clk/xuantie/gate/vosys-gate.c | 111 + drivers/clk/xuantie/gate/vpsys-gate.c | 99 + drivers/clk/xuantie/gate/xuantie-gate.c | 114 + - drivers/cpufreq/Kconfig | 9 + + drivers/clocksource/timer-riscv.c | 6 +- + drivers/cpufreq/Kconfig | 38 + + drivers/cpufreq/Kconfig.arm | 26 - drivers/cpufreq/Makefile | 1 + - drivers/cpufreq/th1520-cpufreq.c | 584 + + drivers/cpufreq/th1520-cpufreq.c | 588 + + drivers/cpuidle/cpuidle-riscv-sbi.c | 49 +- + drivers/dma/Kconfig | 7 + + drivers/dma/Makefile | 1 + .../dma/dw-axi-dmac/dw-axi-dmac-platform.c | 106 +- drivers/dma/dw-axi-dmac/dw-axi-dmac.h | 11 + drivers/dma/mv_xor_v2.c | 8 +- drivers/dma/qcom/hidma.c | 6 +- - drivers/firmware/Kconfig | 1 + + drivers/dma/spacemit-k1-dma.c | 1515 ++ + drivers/firmware/Kconfig | 3 +- drivers/firmware/Makefile | 1 + + drivers/firmware/efi/libstub/Makefile | 2 +- + drivers/firmware/efi/riscv-runtime.c | 13 + + drivers/firmware/qemu_fw_cfg.c | 2 +- drivers/firmware/xuantie/Kconfig | 23 + drivers/firmware/xuantie/Makefile | 4 + drivers/firmware/xuantie/th1520_aon.c | 341 + drivers/firmware/xuantie/th1520_aon_pd.c | 414 + drivers/firmware/xuantie/th1520_proc_debug.c | 173 + + drivers/gpio/Kconfig | 9 + + drivers/gpio/Makefile | 1 + drivers/gpio/gpio-dwapb.c | 15 +- + drivers/gpio/gpio-k1x.c | 407 + drivers/gpio/gpio-pca953x.c | 12 +- drivers/gpu/drm/Kconfig | 4 + drivers/gpu/drm/Makefile | 2 + @@ -780,36 +931,80 @@ Signed-off-by: Mingzheng Xing drivers/hwspinlock/Kconfig | 8 + drivers/hwspinlock/Makefile | 1 + drivers/hwspinlock/th1520_hwspinlock.c | 129 + - drivers/i2c/busses/Makefile | 1 + + drivers/i2c/busses/Kconfig | 8 + + drivers/i2c/busses/Makefile | 2 + drivers/i2c/busses/i2c-designware-common.c | 27 + drivers/i2c/busses/i2c-designware-core.h | 22 +- drivers/i2c/busses/i2c-designware-master.c | 77 +- .../i2c/busses/i2c-designware-master_dma.c | 348 + .../i2c/busses/i2c-designware-master_dma.h | 6 + - drivers/i2c/busses/i2c-designware-platdrv.c | 2 + - drivers/iio/adc/Kconfig | 13 + - drivers/iio/adc/Makefile | 1 + + drivers/i2c/busses/i2c-designware-platdrv.c | 3 + + drivers/i2c/busses/i2c-spacemit-k1.c | 1299 ++ + drivers/i2c/busses/i2c-spacemit-k1.h | 225 + + drivers/iio/adc/Kconfig | 23 + + drivers/iio/adc/Makefile | 2 + + drivers/iio/adc/spacemit-p1-adc.c | 278 + drivers/iio/adc/th1520-adc.c | 573 + drivers/iio/adc/th1520-adc.h | 192 + - drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 5 +- - drivers/irqchip/Kconfig | 25 + - drivers/irqchip/Makefile | 3 + - drivers/irqchip/irq-riscv-aplic-direct.c | 323 + - drivers/irqchip/irq-riscv-aplic-main.c | 211 + - drivers/irqchip/irq-riscv-aplic-main.h | 52 + - drivers/irqchip/irq-riscv-aplic-msi.c | 278 + - drivers/irqchip/irq-riscv-imsic-early.c | 201 + - drivers/irqchip/irq-riscv-imsic-platform.c | 375 + - drivers/irqchip/irq-riscv-imsic-state.c | 865 + + drivers/input/misc/Kconfig | 10 + + drivers/input/misc/Makefile | 1 + + drivers/input/misc/spacemit-p1-pwrkey.c | 211 + + drivers/iommu/Kconfig | 1 + + drivers/iommu/Makefile | 2 +- + drivers/iommu/apple-dart.c | 3 +- + drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 8 +- + drivers/iommu/arm/arm-smmu/arm-smmu.c | 3 +- + drivers/iommu/arm/arm-smmu/qcom_iommu.c | 3 +- + drivers/iommu/exynos-iommu.c | 2 +- + drivers/iommu/intel/dmar.c | 16 +- + drivers/iommu/intel/iommu.c | 47 +- + drivers/iommu/intel/iommu.h | 2 - + drivers/iommu/intel/irq_remapping.c | 16 +- + drivers/iommu/intel/pasid.c | 18 +- + drivers/iommu/intel/svm.c | 11 +- + drivers/iommu/iommu-pages.h | 154 + + drivers/iommu/iommu.c | 2 +- + drivers/iommu/ipmmu-vmsa.c | 4 +- + drivers/iommu/msm_iommu.c | 4 +- + drivers/iommu/mtk_iommu.c | 3 +- + drivers/iommu/mtk_iommu_v1.c | 3 +- + drivers/iommu/riscv/Kconfig | 20 + + drivers/iommu/riscv/Makefile | 3 + + drivers/iommu/riscv/iommu-bits.h | 784 + + drivers/iommu/riscv/iommu-pci.c | 120 + + drivers/iommu/riscv/iommu-platform.c | 92 + + drivers/iommu/riscv/iommu.c | 1661 ++ + drivers/iommu/riscv/iommu.h | 88 + + drivers/iommu/rockchip-iommu.c | 2 +- + drivers/iommu/sprd-iommu.c | 3 +- + drivers/iommu/sun50i-iommu.c | 2 +- + drivers/iommu/tegra-smmu.c | 4 +- + drivers/iommu/virtio-iommu.c | 3 +- + drivers/irqchip/Kconfig | 45 + + drivers/irqchip/Makefile | 5 + + drivers/irqchip/irq-riscv-aplic-direct.c | 329 + + drivers/irqchip/irq-riscv-aplic-main.c | 234 + + drivers/irqchip/irq-riscv-aplic-main.h | 53 + + drivers/irqchip/irq-riscv-aplic-msi.c | 285 + + drivers/irqchip/irq-riscv-imsic-early.c | 263 + + drivers/irqchip/irq-riscv-imsic-platform.c | 395 + + drivers/irqchip/irq-riscv-imsic-state.c | 891 + drivers/irqchip/irq-riscv-imsic-state.h | 108 + - drivers/irqchip/irq-riscv-intc.c | 45 +- + drivers/irqchip/irq-riscv-intc.c | 152 +- + drivers/irqchip/irq-sg2044-msi.c | 403 + + drivers/irqchip/irq-sifive-plic.c | 451 +- + drivers/irqchip/irq-thead-c900-aclint-sswi.c | 351 + drivers/mailbox/Kconfig | 8 + drivers/mailbox/Makefile | 2 + drivers/mailbox/bcm-flexrm-mailbox.c | 8 +- drivers/mailbox/th1520-mailbox.c | 614 + - drivers/mmc/host/Kconfig | 14 + - drivers/mmc/host/Makefile | 1 + + drivers/mfd/Kconfig | 12 + + drivers/mfd/Makefile | 2 + + drivers/mfd/spacemit-p1.c | 481 + + drivers/mmc/host/Kconfig | 25 + + drivers/mmc/host/Makefile | 2 + drivers/mmc/host/sdhci-of-dwcmshc.c | 649 + + drivers/mmc/host/sdhci-of-k1.c | 1475 ++ drivers/mmc/host/sdhci-sophgo.c | 619 + drivers/mmc/host/sdhci-sophgo.h | 121 + drivers/mmc/host/sdhci.c | 12 +- @@ -818,11 +1013,18 @@ Signed-off-by: Mingzheng Xing drivers/mtd/spi-nor/controllers/Makefile | 1 + .../mtd/spi-nor/controllers/sophgo-spifmc.c | 445 + drivers/mtd/spi-nor/gigadevice.c | 14 + - drivers/net/ethernet/intel/i40e/i40e_common.c | 3 +- - drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 2 +- - drivers/net/ethernet/stmicro/stmmac/Kconfig | 18 + - drivers/net/ethernet/stmicro/stmmac/Makefile | 2 + + drivers/net/ethernet/Kconfig | 1 + + drivers/net/ethernet/Makefile | 1 + + drivers/net/ethernet/intel/i40e/i40e_common.c | 6 +- + .../net/ethernet/intel/ixgbe/ixgbe_common.c | 6 +- + drivers/net/ethernet/spacemit/Kconfig | 24 + + drivers/net/ethernet/spacemit/Makefile | 6 + + drivers/net/ethernet/spacemit/k1-emac.c | 2739 +++ + drivers/net/ethernet/spacemit/k1-emac.h | 727 + + drivers/net/ethernet/stmicro/stmmac/Kconfig | 29 + + drivers/net/ethernet/stmicro/stmmac/Makefile | 3 + .../ethernet/stmicro/stmmac/dwmac-sophgo.c | 268 + + .../ethernet/stmicro/stmmac/dwmac-ultrarisc.c | 69 + .../ethernet/stmicro/stmmac/dwmac-xuantie.c | 584 + .../net/ethernet/stmicro/stmmac/dwmac_lib.c | 2 +- drivers/net/wireless/Kconfig | 2 +- @@ -962,49 +1164,76 @@ Signed-off-by: Mingzheng Xing drivers/nvmem/Kconfig | 10 + drivers/nvmem/Makefile | 2 + drivers/nvmem/th1520-efuse.c | 1197 ++ + drivers/of/device.c | 42 +- drivers/pci/controller/cadence/Kconfig | 11 + drivers/pci/controller/cadence/Makefile | 1 + - .../controller/cadence/pcie-cadence-sophgo.c | 973 + - .../controller/cadence/pcie-cadence-sophgo.h | 17 + - drivers/pci/msi/msi.c | 97 +- + .../controller/cadence/pcie-cadence-sophgo.c | 936 + + .../controller/cadence/pcie-cadence-sophgo.h | 6 + + drivers/pci/controller/dwc/Kconfig | 26 + + drivers/pci/controller/dwc/Makefile | 2 + + .../pci/controller/dwc/pcie-designware-host.c | 86 + + drivers/pci/controller/dwc/pcie-designware.h | 39 + + drivers/pci/controller/dwc/pcie-dw-sophgo.c | 1687 ++ + drivers/pci/controller/dwc/pcie-dw-sophgo.h | 251 + + drivers/pci/controller/dwc/pcie-ultrarisc.c | 139 + + drivers/pci/msi/msi.c | 61 +- + drivers/pci/pci-acpi.c | 248 +- + drivers/pci/pci.h | 4 +- drivers/pci/pcie/portdrv.c | 2 +- + drivers/perf/Kconfig | 14 + drivers/perf/arm_smmuv3_pmu.c | 4 +- + drivers/perf/riscv_pmu_sbi.c | 44 +- drivers/phy/Kconfig | 1 + drivers/phy/Makefile | 3 +- drivers/phy/synopsys/Kconfig | 13 + drivers/phy/synopsys/Makefile | 3 + drivers/phy/synopsys/phy-dw-mipi-dphy.c | 824 + - drivers/pinctrl/Kconfig | 11 +- - drivers/pinctrl/Makefile | 2 + + drivers/pinctrl/Kconfig | 37 +- + drivers/pinctrl/Makefile | 5 + + drivers/pinctrl/pinctrl-spacemit-k1x.c | 2101 ++ + drivers/pinctrl/pinctrl-spacemit-p1.c | 631 + drivers/pinctrl/pinctrl-th1520.c | 1180 ++ drivers/pinctrl/sophgo/Makefile | 2 + drivers/pinctrl/sophgo/pinctrl-mango.c | 453 + drivers/pinctrl/sophgo/pinctrl-sophgo.c | 292 + drivers/pinctrl/sophgo/pinctrl-sophgo.h | 70 + - drivers/pwm/Kconfig | 11 + + drivers/pinctrl/ultrarisc/Kconfig | 23 + + drivers/pinctrl/ultrarisc/Makefile | 4 + + .../ultrarisc/pinctrl-ultrarisc-dp1000.c | 122 + + drivers/pinctrl/ultrarisc/pinctrl-ultrarisc.c | 499 + + drivers/pinctrl/ultrarisc/pinctrl-ultrarisc.h | 77 + + .../platform/surface/surface_acpi_notify.c | 14 +- + drivers/pwm/Kconfig | 13 +- drivers/pwm/Makefile | 2 + + drivers/pwm/pwm-pxa.c | 22 +- drivers/pwm/pwm-sophgo.c | 276 + drivers/pwm/pwm-xuantie.c | 270 + - drivers/regulator/Kconfig | 10 + - drivers/regulator/Makefile | 1 + + drivers/regulator/Kconfig | 17 + + drivers/regulator/Makefile | 2 + + drivers/regulator/spacemit-p1-regulator.c | 268 + drivers/regulator/th1520-aon-regulator.c | 770 + - drivers/reset/Kconfig | 10 + - drivers/reset/Makefile | 2 + + drivers/reset/Kconfig | 16 + + drivers/reset/Makefile | 3 + drivers/reset/reset-sophgo.c | 163 + + drivers/reset/reset-spacemit-k1x.c | 669 + drivers/reset/reset-th1520.c | 170 + drivers/rpmsg/Kconfig | 4 + drivers/rpmsg/Makefile | 1 + drivers/rpmsg/th1520_rpmsg.c | 958 + - drivers/rtc/Kconfig | 18 +- - drivers/rtc/Makefile | 1 + + drivers/rtc/Kconfig | 28 +- + drivers/rtc/Makefile | 2 + drivers/rtc/rtc-astbmc.c | 535 + + drivers/rtc/rtc-spacemit-p1.c | 716 + drivers/rtc/rtc-xgene.c | 32 + - drivers/soc/Kconfig | 1 + - drivers/soc/Makefile | 2 + + drivers/soc/Kconfig | 2 + + drivers/soc/Makefile | 3 + drivers/soc/sophgo/Makefile | 3 + drivers/soc/sophgo/tach/sophgo-tach.c | 330 + drivers/soc/sophgo/top/top_intc.c | 412 + drivers/soc/sophgo/umcu/mcu.c | 1144 ++ + drivers/soc/spacemit/Kconfig | 13 + + drivers/soc/spacemit/Makefile | 1 + + drivers/soc/spacemit/spacemit-mem-range.c | 39 + drivers/soc/xuantie/Kconfig | 34 + drivers/soc/xuantie/Makefile | 13 + drivers/soc/xuantie/nna/GPLHEADER | 356 + @@ -1203,19 +1432,29 @@ Signed-off-by: Mingzheng Xing .../linux/kernel_module/vcmdswhwregisters.c | 180 + .../linux/kernel_module/vcmdswhwregisters.h | 244 + .../linux/kernel_module/venc_trace_point.h | 43 + - drivers/spi/Kconfig | 6 + - drivers/spi/Makefile | 1 + + drivers/spi/Kconfig | 21 + + drivers/spi/Makefile | 3 + drivers/spi/spi-dw-mmio-quad.c | 216 + + drivers/spi/spi-dw-mmio.c | 1 + drivers/spi/spi-dw-quad.c | 830 + drivers/spi/spi-dw-quad.h | 365 + + drivers/spi/spi-spacemit-k1-qspi.c | 1572 ++ + drivers/spi/spi-spacemit-k1.c | 1281 ++ + drivers/spi/spi-spacemit-k1.h | 281 + drivers/spi/spidev.c | 40 + drivers/tee/Kconfig | 2 +- drivers/tee/optee/Kconfig | 2 +- drivers/tee/optee/call.c | 2 + drivers/tee/optee/smc_abi.c | 37 + drivers/tty/serial/8250/8250_dma.c | 134 +- - drivers/tty/serial/8250/8250_dw.c | 2 +- + drivers/tty/serial/8250/8250_dw.c | 167 +- + drivers/tty/serial/8250/8250_dwlib.c | 3 +- + drivers/tty/serial/8250/8250_dwlib.h | 33 +- drivers/tty/serial/8250/8250_port.c | 12 +- + drivers/tty/serial/Kconfig | 19 +- + drivers/tty/serial/Makefile | 1 + + drivers/tty/serial/serial_port.c | 145 + + drivers/tty/serial/spacemit_k1x_uart.c | 1979 ++ drivers/ufs/host/ufs-qcom.c | 9 +- drivers/usb/dwc3/Kconfig | 20 + drivers/usb/dwc3/Makefile | 2 + @@ -1225,10 +1464,13 @@ Signed-off-by: Mingzheng Xing drivers/watchdog/Makefile | 1 + drivers/watchdog/dw_wdt.c | 13 +- drivers/watchdog/th1520_wdt.c | 393 + + include/acpi/acpi_bus.h | 37 +- + include/acpi/actbl3.h | 18 +- include/asm-generic/pgalloc.h | 7 +- include/drm/bridge/dw_hdmi.h | 5 + .../dt-bindings/clock/sophgo-mango-clock.h | 165 + include/dt-bindings/clock/sophgo.h | 15 + + .../dt-bindings/clock/spacemit-k1x-clock.h | 223 + include/dt-bindings/clock/th1520-audiosys.h | 35 + include/dt-bindings/clock/th1520-dspsys.h | 33 + .../dt-bindings/clock/th1520-fm-ap-clock.h | 513 + @@ -1236,23 +1478,39 @@ Signed-off-by: Mingzheng Xing include/dt-bindings/clock/th1520-visys.h | 54 + include/dt-bindings/clock/th1520-vosys.h | 41 + include/dt-bindings/clock/th1520-vpsys.h | 26 + + include/dt-bindings/dma/spacemit-k1-dma.h | 54 + include/dt-bindings/firmware/xuantie/rsrc.h | 18 + + include/dt-bindings/mmc/spacemit-k1-sdhci.h | 62 + + include/dt-bindings/pinctrl/k1-x-pinctrl.h | 198 + + .../dt-bindings/pinctrl/ur-dp1000-pinctrl.h | 64 + .../dt-bindings/reset/sophgo-mango-resets.h | 96 + + .../dt-bindings/reset/spacemit-k1x-reset.h | 126 + .../dt-bindings/reset/xuantie,th1520-reset.h | 28 + .../dt-bindings/soc/th1520_system_status.h | 38 + .../dt-bindings/soc/xuantie,th1520-iopmp.h | 41 + - include/linux/cpuhotplug.h | 2 + + include/linux/acpi.h | 15 + + include/linux/acpi_iort.h | 4 +- + include/linux/cpuhotplug.h | 3 + include/linux/cpumask.h | 17 + + include/linux/crc32.h | 3 + + include/linux/dma-direct.h | 18 + include/linux/find.h | 27 + include/linux/firmware/xuantie/ipc.h | 167 + include/linux/firmware/xuantie/th1520_event.h | 35 + + include/linux/iommu.h | 4 +- include/linux/irqchip/riscv-aplic.h | 145 + - include/linux/irqchip/riscv-imsic.h | 87 + + include/linux/irqchip/riscv-imsic.h | 96 + include/linux/irqdomain.h | 17 + include/linux/irqdomain_defs.h | 2 + + include/linux/mfd/spacemit_p1.h | 250 + include/linux/mlx4/device.h | 2 +- include/linux/mm.h | 16 + include/linux/msi.h | 28 +- + include/linux/pci-ecam.h | 1 + + .../linux/platform_data/spacemit_k1_sdhci.h | 99 + + include/linux/serial_core.h | 2 + + include/linux/sizes.h | 9 + + include/linux/string_choices.h | 11 + include/linux/sync_core.h | 16 +- include/linux/th1520_proc_debug.h | 13 + include/linux/th1520_rpmsg.h | 99 + @@ -1312,16 +1570,38 @@ Signed-off-by: Mingzheng Xing .../riscv/thead/c900-legacy/microarch.json | 80 + .../arch/riscv/thead/th1520-ddr/metrics.json | 713 + .../thead/th1520-ddr/uncore-ddr-pmu.json | 1550 ++ - 1307 files changed, 559540 insertions(+), 728 deletions(-) + tools/testing/selftests/hid/Makefile | 10 +- + tools/testing/selftests/hid/progs/hid.c | 3 - + .../selftests/hid/progs/hid_bpf_helpers.h | 77 + + .../testing/selftests/riscv/hwprobe/Makefile | 9 +- + tools/testing/selftests/riscv/hwprobe/cbo.c | 228 + + .../testing/selftests/riscv/hwprobe/hwprobe.c | 64 +- + .../testing/selftests/riscv/hwprobe/hwprobe.h | 15 + + .../selftests/riscv/vector/vstate_prctl.c | 10 +- + 1573 files changed, 606683 insertions(+), 2584 deletions(-) + create mode 100644 .github/workflows/main.yml + rename Documentation/{ => arch}/riscv/acpi.rst (100%) + rename Documentation/{ => arch}/riscv/boot-image-header.rst (100%) + rename Documentation/{ => arch}/riscv/boot.rst (100%) + rename Documentation/{ => arch}/riscv/features.rst (100%) + create mode 100644 Documentation/arch/riscv/hwprobe.rst + rename Documentation/{ => arch}/riscv/index.rst (100%) + rename Documentation/{ => arch}/riscv/patch-acceptance.rst (100%) + rename Documentation/{ => arch}/riscv/uabi.rst (100%) + rename Documentation/{ => arch}/riscv/vector.rst (100%) + rename Documentation/{ => arch}/riscv/vm-layout.rst (100%) create mode 100644 Documentation/devicetree/bindings/hwlock/xuantie,th1520-hwspinlock.yaml create mode 100644 Documentation/devicetree/bindings/iio/adc/thead,th1520-adc.yaml create mode 100644 Documentation/devicetree/bindings/iio/adc/xuantie,th1520-adc.yaml create mode 100644 Documentation/devicetree/bindings/interrupt-controller/riscv,aplic.yaml create mode 100644 Documentation/devicetree/bindings/interrupt-controller/riscv,imsics.yaml + create mode 100644 Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-sswi.yaml + create mode 100644 Documentation/devicetree/bindings/iommu/riscv,iommu.yaml create mode 100644 Documentation/devicetree/bindings/mailbox/xuantie-th1520-mailbox.txt create mode 100644 Documentation/devicetree/bindings/net/xuantie,dwmac.yaml create mode 100644 Documentation/devicetree/bindings/nvmem/xuantie,th1520-efuse.txt create mode 100644 Documentation/devicetree/bindings/pinctrl/thead,th1520-pinctrl.yaml + create mode 100644 Documentation/devicetree/bindings/pinctrl/ultrarisc,dp1000-pinctrl.yaml create mode 100644 Documentation/devicetree/bindings/pwm/xuantie,th1520-pwm.yaml create mode 100644 Documentation/devicetree/bindings/reset/xuantie,th1520-reset.yaml create mode 100644 Documentation/devicetree/bindings/soc/xuantie/xuantie,th1520-event.yaml @@ -1334,7 +1614,13 @@ Signed-off-by: Mingzheng Xing create mode 100644 Documentation/devicetree/bindings/spi/xuantie,th1520-spi.yaml create mode 100644 Documentation/devicetree/bindings/usb/xuantie,th1520-usb.yaml create mode 100644 Documentation/devicetree/bindings/watchdog/xuantie,th1520-wdt.yaml + delete mode 100644 Documentation/riscv/hwprobe.rst create mode 100644 Documentation/scheduler/membarrier.rst + rename Documentation/translations/zh_CN/{ => arch}/riscv/boot-image-header.rst (96%) + rename Documentation/translations/zh_CN/{ => arch}/riscv/index.rst (79%) + rename Documentation/translations/zh_CN/{ => arch}/riscv/patch-acceptance.rst (93%) + rename Documentation/translations/zh_CN/{ => arch}/riscv/vm-layout.rst (98%) + create mode 100644 arch/riscv/Kconfig.vendor create mode 100644 arch/riscv/Makefile.isa create mode 100644 arch/riscv/boot/dts/sophgo/Makefile create mode 100644 arch/riscv/boot/dts/sophgo/mango-2sockets.dtsi @@ -1357,17 +1643,61 @@ Signed-off-by: Mingzheng Xing create mode 100644 arch/riscv/boot/dts/sophgo/mango-top-intc2.dtsi create mode 100644 arch/riscv/boot/dts/sophgo/mango-yixin-s2110.dts create mode 100644 arch/riscv/boot/dts/sophgo/mango.dtsi + create mode 100644 arch/riscv/boot/dts/spacemit/Makefile + create mode 100644 arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts + create mode 100644 arch/riscv/boot/dts/spacemit/k1-x.dtsi + create mode 100644 arch/riscv/boot/dts/spacemit/k1-x_pinctrl.dtsi create mode 100644 arch/riscv/boot/dts/thead/th1520-lichee-pi-4a-16g.dts create mode 100644 arch/riscv/boot/dts/thead/th1520-lpi4a-dsi0.dts create mode 100644 arch/riscv/boot/dts/thead/th1520-lpi4a-hx8279.dts + create mode 100644 arch/riscv/boot/dts/ultrarisc/Makefile + create mode 100644 arch/riscv/boot/dts/ultrarisc/dp1000-evb-pinctrl.dtsi + create mode 100644 arch/riscv/boot/dts/ultrarisc/dp1000-evb-v1.dts + create mode 100644 arch/riscv/boot/dts/ultrarisc/dp1000.dts + create mode 100644 arch/riscv/configs/dp1000_defconfig + create mode 100644 arch/riscv/configs/k1_defconfig create mode 100644 arch/riscv/configs/sg2042_defconfig create mode 100644 arch/riscv/configs/th1520_defconfig + create mode 100644 arch/riscv/include/asm/arch_hweight.h + create mode 100644 arch/riscv/include/asm/archrandom.h + create mode 100644 arch/riscv/include/asm/cpufeature-macros.h + create mode 100644 arch/riscv/include/asm/dmi.h + delete mode 100644 arch/riscv/include/asm/kvm_aia_aplic.h + delete mode 100644 arch/riscv/include/asm/kvm_aia_imsic.h create mode 100644 arch/riscv/include/asm/sync_core.h + create mode 100644 arch/riscv/include/asm/vendor_extensions.h + create mode 100644 arch/riscv/include/asm/vendor_extensions/andes.h + create mode 100644 arch/riscv/kernel/acpi_numa.c + create mode 100644 arch/riscv/kernel/sys_hwprobe.c + create mode 100644 arch/riscv/kernel/vendor_extensions.c + create mode 100644 arch/riscv/kernel/vendor_extensions/Makefile + create mode 100644 arch/riscv/kernel/vendor_extensions/andes.c + create mode 100644 arch/riscv/lib/crc32.c + create mode 100644 drivers/acpi/mipi-disco-img.c + create mode 100644 drivers/acpi/riscv/cppc.c + create mode 100644 drivers/acpi/riscv/cpuidle.c + create mode 100644 drivers/acpi/riscv/init.c + create mode 100644 drivers/acpi/riscv/init.h + create mode 100644 drivers/acpi/riscv/irq.c create mode 100644 drivers/clk/sophgo/Makefile create mode 100644 drivers/clk/sophgo/clk-dummy.c create mode 100644 drivers/clk/sophgo/clk-mango.c create mode 100644 drivers/clk/sophgo/clk.c create mode 100644 drivers/clk/sophgo/clk.h + create mode 100644 drivers/clk/spacemit/Kconfig + create mode 100644 drivers/clk/spacemit/Makefile + create mode 100644 drivers/clk/spacemit/ccu-spacemit-k1x.c + create mode 100644 drivers/clk/spacemit/ccu-spacemit-k1x.h + create mode 100644 drivers/clk/spacemit/ccu_ddn.c + create mode 100644 drivers/clk/spacemit/ccu_ddn.h + create mode 100644 drivers/clk/spacemit/ccu_ddr.c + create mode 100644 drivers/clk/spacemit/ccu_ddr.h + create mode 100644 drivers/clk/spacemit/ccu_dpll.c + create mode 100644 drivers/clk/spacemit/ccu_dpll.h + create mode 100644 drivers/clk/spacemit/ccu_mix.c + create mode 100644 drivers/clk/spacemit/ccu_mix.h + create mode 100644 drivers/clk/spacemit/ccu_pll.c + create mode 100644 drivers/clk/spacemit/ccu_pll.h create mode 100644 drivers/clk/xuantie/Kconfig create mode 100644 drivers/clk/xuantie/Makefile create mode 100644 drivers/clk/xuantie/clk-th1520-fm.c @@ -1383,11 +1713,13 @@ Signed-off-by: Mingzheng Xing create mode 100644 drivers/clk/xuantie/gate/vpsys-gate.c create mode 100644 drivers/clk/xuantie/gate/xuantie-gate.c create mode 100644 drivers/cpufreq/th1520-cpufreq.c + create mode 100644 drivers/dma/spacemit-k1-dma.c create mode 100644 drivers/firmware/xuantie/Kconfig create mode 100644 drivers/firmware/xuantie/Makefile create mode 100644 drivers/firmware/xuantie/th1520_aon.c create mode 100644 drivers/firmware/xuantie/th1520_aon_pd.c create mode 100644 drivers/firmware/xuantie/th1520_proc_debug.c + create mode 100644 drivers/gpio/gpio-k1x.c create mode 100644 drivers/gpu/drm/img-rogue/Kconfig create mode 100644 drivers/gpu/drm/img-rogue/Makefile create mode 100644 drivers/gpu/drm/img-rogue/allocmem.c @@ -2003,8 +2335,20 @@ Signed-off-by: Mingzheng Xing create mode 100644 drivers/hwspinlock/th1520_hwspinlock.c create mode 100644 drivers/i2c/busses/i2c-designware-master_dma.c create mode 100644 drivers/i2c/busses/i2c-designware-master_dma.h + create mode 100644 drivers/i2c/busses/i2c-spacemit-k1.c + create mode 100644 drivers/i2c/busses/i2c-spacemit-k1.h + create mode 100644 drivers/iio/adc/spacemit-p1-adc.c create mode 100644 drivers/iio/adc/th1520-adc.c create mode 100644 drivers/iio/adc/th1520-adc.h + create mode 100644 drivers/input/misc/spacemit-p1-pwrkey.c + create mode 100644 drivers/iommu/iommu-pages.h + create mode 100644 drivers/iommu/riscv/Kconfig + create mode 100644 drivers/iommu/riscv/Makefile + create mode 100644 drivers/iommu/riscv/iommu-bits.h + create mode 100644 drivers/iommu/riscv/iommu-pci.c + create mode 100644 drivers/iommu/riscv/iommu-platform.c + create mode 100644 drivers/iommu/riscv/iommu.c + create mode 100644 drivers/iommu/riscv/iommu.h create mode 100644 drivers/irqchip/irq-riscv-aplic-direct.c create mode 100644 drivers/irqchip/irq-riscv-aplic-main.c create mode 100644 drivers/irqchip/irq-riscv-aplic-main.h @@ -2013,11 +2357,20 @@ Signed-off-by: Mingzheng Xing create mode 100644 drivers/irqchip/irq-riscv-imsic-platform.c create mode 100644 drivers/irqchip/irq-riscv-imsic-state.c create mode 100644 drivers/irqchip/irq-riscv-imsic-state.h + create mode 100644 drivers/irqchip/irq-sg2044-msi.c + create mode 100644 drivers/irqchip/irq-thead-c900-aclint-sswi.c create mode 100644 drivers/mailbox/th1520-mailbox.c + create mode 100644 drivers/mfd/spacemit-p1.c + create mode 100644 drivers/mmc/host/sdhci-of-k1.c create mode 100644 drivers/mmc/host/sdhci-sophgo.c create mode 100644 drivers/mmc/host/sdhci-sophgo.h create mode 100644 drivers/mtd/spi-nor/controllers/sophgo-spifmc.c + create mode 100644 drivers/net/ethernet/spacemit/Kconfig + create mode 100644 drivers/net/ethernet/spacemit/Makefile + create mode 100644 drivers/net/ethernet/spacemit/k1-emac.c + create mode 100644 drivers/net/ethernet/spacemit/k1-emac.h create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c + create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac-ultrarisc.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac-xuantie.c create mode 100644 drivers/net/wireless/aic8800/Kconfig create mode 100644 drivers/net/wireless/aic8800/Makefile @@ -2154,25 +2507,41 @@ Signed-off-by: Mingzheng Xing create mode 100644 drivers/nvmem/th1520-efuse.c create mode 100644 drivers/pci/controller/cadence/pcie-cadence-sophgo.c create mode 100644 drivers/pci/controller/cadence/pcie-cadence-sophgo.h + create mode 100644 drivers/pci/controller/dwc/pcie-dw-sophgo.c + create mode 100644 drivers/pci/controller/dwc/pcie-dw-sophgo.h + create mode 100644 drivers/pci/controller/dwc/pcie-ultrarisc.c create mode 100644 drivers/phy/synopsys/Kconfig create mode 100644 drivers/phy/synopsys/Makefile create mode 100644 drivers/phy/synopsys/phy-dw-mipi-dphy.c + create mode 100644 drivers/pinctrl/pinctrl-spacemit-k1x.c + create mode 100644 drivers/pinctrl/pinctrl-spacemit-p1.c create mode 100644 drivers/pinctrl/pinctrl-th1520.c create mode 100644 drivers/pinctrl/sophgo/Makefile create mode 100644 drivers/pinctrl/sophgo/pinctrl-mango.c create mode 100644 drivers/pinctrl/sophgo/pinctrl-sophgo.c create mode 100644 drivers/pinctrl/sophgo/pinctrl-sophgo.h + create mode 100644 drivers/pinctrl/ultrarisc/Kconfig + create mode 100644 drivers/pinctrl/ultrarisc/Makefile + create mode 100644 drivers/pinctrl/ultrarisc/pinctrl-ultrarisc-dp1000.c + create mode 100644 drivers/pinctrl/ultrarisc/pinctrl-ultrarisc.c + create mode 100644 drivers/pinctrl/ultrarisc/pinctrl-ultrarisc.h create mode 100644 drivers/pwm/pwm-sophgo.c create mode 100644 drivers/pwm/pwm-xuantie.c + create mode 100644 drivers/regulator/spacemit-p1-regulator.c create mode 100644 drivers/regulator/th1520-aon-regulator.c create mode 100644 drivers/reset/reset-sophgo.c + create mode 100644 drivers/reset/reset-spacemit-k1x.c create mode 100644 drivers/reset/reset-th1520.c create mode 100644 drivers/rpmsg/th1520_rpmsg.c create mode 100644 drivers/rtc/rtc-astbmc.c + create mode 100644 drivers/rtc/rtc-spacemit-p1.c create mode 100644 drivers/soc/sophgo/Makefile create mode 100644 drivers/soc/sophgo/tach/sophgo-tach.c create mode 100644 drivers/soc/sophgo/top/top_intc.c create mode 100644 drivers/soc/sophgo/umcu/mcu.c + create mode 100644 drivers/soc/spacemit/Kconfig + create mode 100644 drivers/soc/spacemit/Makefile + create mode 100644 drivers/soc/spacemit/spacemit-mem-range.c create mode 100644 drivers/soc/xuantie/Kconfig create mode 100644 drivers/soc/xuantie/Makefile create mode 100644 drivers/soc/xuantie/nna/GPLHEADER @@ -2374,10 +2743,15 @@ Signed-off-by: Mingzheng Xing create mode 100644 drivers/spi/spi-dw-mmio-quad.c create mode 100644 drivers/spi/spi-dw-quad.c create mode 100644 drivers/spi/spi-dw-quad.h + create mode 100644 drivers/spi/spi-spacemit-k1-qspi.c + create mode 100644 drivers/spi/spi-spacemit-k1.c + create mode 100644 drivers/spi/spi-spacemit-k1.h + create mode 100644 drivers/tty/serial/spacemit_k1x_uart.c create mode 100644 drivers/usb/dwc3/dwc3-xuantie.c create mode 100644 drivers/watchdog/th1520_wdt.c create mode 100644 include/dt-bindings/clock/sophgo-mango-clock.h create mode 100644 include/dt-bindings/clock/sophgo.h + create mode 100644 include/dt-bindings/clock/spacemit-k1x-clock.h create mode 100644 include/dt-bindings/clock/th1520-audiosys.h create mode 100644 include/dt-bindings/clock/th1520-dspsys.h create mode 100644 include/dt-bindings/clock/th1520-fm-ap-clock.h @@ -2385,8 +2759,13 @@ Signed-off-by: Mingzheng Xing create mode 100644 include/dt-bindings/clock/th1520-visys.h create mode 100644 include/dt-bindings/clock/th1520-vosys.h create mode 100644 include/dt-bindings/clock/th1520-vpsys.h + create mode 100644 include/dt-bindings/dma/spacemit-k1-dma.h create mode 100644 include/dt-bindings/firmware/xuantie/rsrc.h + create mode 100644 include/dt-bindings/mmc/spacemit-k1-sdhci.h + create mode 100644 include/dt-bindings/pinctrl/k1-x-pinctrl.h + create mode 100644 include/dt-bindings/pinctrl/ur-dp1000-pinctrl.h create mode 100644 include/dt-bindings/reset/sophgo-mango-resets.h + create mode 100644 include/dt-bindings/reset/spacemit-k1x-reset.h create mode 100644 include/dt-bindings/reset/xuantie,th1520-reset.h create mode 100644 include/dt-bindings/soc/th1520_system_status.h create mode 100644 include/dt-bindings/soc/xuantie,th1520-iopmp.h @@ -2394,6 +2773,8 @@ Signed-off-by: Mingzheng Xing create mode 100644 include/linux/firmware/xuantie/th1520_event.h create mode 100644 include/linux/irqchip/riscv-aplic.h create mode 100644 include/linux/irqchip/riscv-imsic.h + create mode 100644 include/linux/mfd/spacemit_p1.h + create mode 100644 include/linux/platform_data/spacemit_k1_sdhci.h create mode 100644 include/linux/th1520_proc_debug.h create mode 100644 include/linux/th1520_rpmsg.h create mode 100644 include/soc/xuantie/th1520_system_monitor.h @@ -2427,7 +2808,361 @@ Signed-off-by: Mingzheng Xing create mode 100644 tools/perf/pmu-events/arch/riscv/thead/c900-legacy/microarch.json create mode 100644 tools/perf/pmu-events/arch/riscv/thead/th1520-ddr/metrics.json create mode 100644 tools/perf/pmu-events/arch/riscv/thead/th1520-ddr/uncore-ddr-pmu.json + create mode 100644 tools/testing/selftests/riscv/hwprobe/cbo.c + create mode 100644 tools/testing/selftests/riscv/hwprobe/hwprobe.h +diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml +new file mode 100644 +index 000000000000..c0755aecee33 +--- /dev/null ++++ b/.github/workflows/main.yml +@@ -0,0 +1,20 @@ ++name: rvck ci ++ ++on: ++ pull_request_target: ++ types: [opened,reopened,synchronize] ++ issues: ++ types: [opened,reopened] ++ issue_comment: ++ types: [created] ++ ++jobs: ++ rvck-ci: ++ permissions: ++ issues: write ++ pull-requests: write ++ uses: OERV-RVCI/RVCK-RAVA/.github/workflows/rvck-actions.yml@main ++ secrets: ++ LAVA_TOKEN: ${{ secrets.LAVA_TOKEN }} ++ RSYNC_PASSPHRASE: ${{ secrets.RSYNC_PASSPHRASE }} ++ +diff --git a/Documentation/arch/index.rst b/Documentation/arch/index.rst +index 84b80255b851..f4794117e56b 100644 +--- a/Documentation/arch/index.rst ++++ b/Documentation/arch/index.rst +@@ -20,7 +20,7 @@ implementation. + openrisc/index + parisc/index + ../powerpc/index +- ../riscv/index ++ riscv/index + s390/index + sh/index + sparc/index +diff --git a/Documentation/riscv/acpi.rst b/Documentation/arch/riscv/acpi.rst +similarity index 100% +rename from Documentation/riscv/acpi.rst +rename to Documentation/arch/riscv/acpi.rst +diff --git a/Documentation/riscv/boot-image-header.rst b/Documentation/arch/riscv/boot-image-header.rst +similarity index 100% +rename from Documentation/riscv/boot-image-header.rst +rename to Documentation/arch/riscv/boot-image-header.rst +diff --git a/Documentation/riscv/boot.rst b/Documentation/arch/riscv/boot.rst +similarity index 100% +rename from Documentation/riscv/boot.rst +rename to Documentation/arch/riscv/boot.rst +diff --git a/Documentation/riscv/features.rst b/Documentation/arch/riscv/features.rst +similarity index 100% +rename from Documentation/riscv/features.rst +rename to Documentation/arch/riscv/features.rst +diff --git a/Documentation/arch/riscv/hwprobe.rst b/Documentation/arch/riscv/hwprobe.rst +new file mode 100644 +index 000000000000..971370894bfd +--- /dev/null ++++ b/Documentation/arch/riscv/hwprobe.rst +@@ -0,0 +1,271 @@ ++.. SPDX-License-Identifier: GPL-2.0 ++ ++RISC-V Hardware Probing Interface ++--------------------------------- ++ ++The RISC-V hardware probing interface is based around a single syscall, which ++is defined in :: ++ ++ struct riscv_hwprobe { ++ __s64 key; ++ __u64 value; ++ }; ++ ++ long sys_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, ++ size_t cpusetsize, cpu_set_t *cpus, ++ unsigned int flags); ++ ++The arguments are split into three groups: an array of key-value pairs, a CPU ++set, and some flags. The key-value pairs are supplied with a count. Userspace ++must prepopulate the key field for each element, and the kernel will fill in the ++value if the key is recognized. If a key is unknown to the kernel, its key field ++will be cleared to -1, and its value set to 0. The CPU set is defined by ++CPU_SET(3) with size ``cpusetsize`` bytes. For value-like keys (eg. vendor, ++arch, impl), the returned value will only be valid if all CPUs in the given set ++have the same value. Otherwise -1 will be returned. For boolean-like keys, the ++value returned will be a logical AND of the values for the specified CPUs. ++Usermode can supply NULL for ``cpus`` and 0 for ``cpusetsize`` as a shortcut for ++all online CPUs. The currently supported flags are: ++ ++* :c:macro:`RISCV_HWPROBE_WHICH_CPUS`: This flag basically reverses the behavior ++ of sys_riscv_hwprobe(). Instead of populating the values of keys for a given ++ set of CPUs, the values of each key are given and the set of CPUs is reduced ++ by sys_riscv_hwprobe() to only those which match each of the key-value pairs. ++ How matching is done depends on the key type. For value-like keys, matching ++ means to be the exact same as the value. For boolean-like keys, matching ++ means the result of a logical AND of the pair's value with the CPU's value is ++ exactly the same as the pair's value. Additionally, when ``cpus`` is an empty ++ set, then it is initialized to all online CPUs which fit within it, i.e. the ++ CPU set returned is the reduction of all the online CPUs which can be ++ represented with a CPU set of size ``cpusetsize``. ++ ++All other flags are reserved for future compatibility and must be zero. ++ ++On success 0 is returned, on failure a negative error code is returned. ++ ++The following keys are defined: ++ ++* :c:macro:`RISCV_HWPROBE_KEY_MVENDORID`: Contains the value of ``mvendorid``, ++ as defined by the RISC-V privileged architecture specification. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_MARCHID`: Contains the value of ``marchid``, as ++ defined by the RISC-V privileged architecture specification. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_MIMPLID`: Contains the value of ``mimplid``, as ++ defined by the RISC-V privileged architecture specification. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_BASE_BEHAVIOR`: A bitmask containing the base ++ user-visible behavior that this kernel supports. The following base user ABIs ++ are defined: ++ ++ * :c:macro:`RISCV_HWPROBE_BASE_BEHAVIOR_IMA`: Support for rv32ima or ++ rv64ima, as defined by version 2.2 of the user ISA and version 1.10 of the ++ privileged ISA, with the following known exceptions (more exceptions may be ++ added, but only if it can be demonstrated that the user ABI is not broken): ++ ++ * The ``fence.i`` instruction cannot be directly executed by userspace ++ programs (it may still be executed in userspace via a ++ kernel-controlled mechanism such as the vDSO). ++ ++* :c:macro:`RISCV_HWPROBE_KEY_IMA_EXT_0`: A bitmask containing the extensions ++ that are compatible with the :c:macro:`RISCV_HWPROBE_BASE_BEHAVIOR_IMA`: ++ base system behavior. ++ ++ * :c:macro:`RISCV_HWPROBE_IMA_FD`: The F and D extensions are supported, as ++ defined by commit cd20cee ("FMIN/FMAX now implement ++ minimumNumber/maximumNumber, not minNum/maxNum") of the RISC-V ISA manual. ++ ++ * :c:macro:`RISCV_HWPROBE_IMA_C`: The C extension is supported, as defined ++ by version 2.2 of the RISC-V ISA manual. ++ ++ * :c:macro:`RISCV_HWPROBE_IMA_V`: The V extension is supported, as defined by ++ version 1.0 of the RISC-V Vector extension manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBA`: The Zba address generation extension is ++ supported, as defined in version 1.0 of the Bit-Manipulation ISA ++ extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBB`: The Zbb extension is supported, as defined ++ in version 1.0 of the Bit-Manipulation ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBS`: The Zbs extension is supported, as defined ++ in version 1.0 of the Bit-Manipulation ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZICBOZ`: The Zicboz extension is supported, as ++ ratified in commit 3dd606f ("Create cmobase-v1.0.pdf") of riscv-CMOs. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBC` The Zbc extension is supported, as defined ++ in version 1.0 of the Bit-Manipulation ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBKB` The Zbkb extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBKC` The Zbkc extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBKX` The Zbkx extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZKND` The Zknd extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZKNE` The Zkne extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZKNH` The Zknh extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZKSED` The Zksed extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZKSH` The Zksh extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZKT` The Zkt extension is supported, as defined ++ in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVBB`: The Zvbb extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVBC`: The Zvbc extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKB`: The Zvkb extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKG`: The Zvkg extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKNED`: The Zvkned extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKNHA`: The Zvknha extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKNHB`: The Zvknhb extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKSED`: The Zvksed extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKSH`: The Zvksh extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKT`: The Zvkt extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZFH`: The Zfh extension version 1.0 is supported ++ as defined in the RISC-V ISA manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZFHMIN`: The Zfhmin extension version 1.0 is ++ supported as defined in the RISC-V ISA manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZIHINTNTL`: The Zihintntl extension version 1.0 ++ is supported as defined in the RISC-V ISA manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVFH`: The Zvfh extension is supported as ++ defined in the RISC-V Vector manual starting from commit e2ccd0548d6c ++ ("Remove draft warnings from Zvfh[min]"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVFHMIN`: The Zvfhmin extension is supported as ++ defined in the RISC-V Vector manual starting from commit e2ccd0548d6c ++ ("Remove draft warnings from Zvfh[min]"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZFA`: The Zfa extension is supported as ++ defined in the RISC-V ISA manual starting from commit 056b6ff467c7 ++ ("Zfa is ratified"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZTSO`: The Ztso extension is supported as ++ defined in the RISC-V ISA manual starting from commit 5618fb5a216b ++ ("Ztso is now ratified.") ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZACAS`: The Zacas extension is supported as ++ defined in the Atomic Compare-and-Swap (CAS) instructions manual starting ++ from commit 5059e0ca641c ("update to ratified"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZICOND`: The Zicond extension is supported as ++ defined in the RISC-V Integer Conditional (Zicond) operations extension ++ manual starting from commit 95cf1f9 ("Add changes requested by Ved ++ during signoff") ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZIHINTPAUSE`: The Zihintpause extension is ++ supported as defined in the RISC-V ISA manual starting from commit ++ d8ab5c78c207 ("Zihintpause is ratified"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVE32X`: The Vector sub-extension Zve32x is ++ supported, as defined by version 1.0 of the RISC-V Vector extension manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVE32F`: The Vector sub-extension Zve32f is ++ supported, as defined by version 1.0 of the RISC-V Vector extension manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVE64X`: The Vector sub-extension Zve64x is ++ supported, as defined by version 1.0 of the RISC-V Vector extension manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVE64F`: The Vector sub-extension Zve64f is ++ supported, as defined by version 1.0 of the RISC-V Vector extension manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVE64D`: The Vector sub-extension Zve64d is ++ supported, as defined by version 1.0 of the RISC-V Vector extension manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZIMOP`: The Zimop May-Be-Operations extension is ++ supported as defined in the RISC-V ISA manual starting from commit ++ 58220614a5f ("Zimop is ratified/1.0"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZCA`: The Zca extension part of Zc* standard ++ extensions for code size reduction, as ratified in commit 8be3419c1c0 ++ ("Zcf doesn't exist on RV64 as it contains no instructions") of ++ riscv-code-size-reduction. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZCB`: The Zcb extension part of Zc* standard ++ extensions for code size reduction, as ratified in commit 8be3419c1c0 ++ ("Zcf doesn't exist on RV64 as it contains no instructions") of ++ riscv-code-size-reduction. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZCD`: The Zcd extension part of Zc* standard ++ extensions for code size reduction, as ratified in commit 8be3419c1c0 ++ ("Zcf doesn't exist on RV64 as it contains no instructions") of ++ riscv-code-size-reduction. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZCF`: The Zcf extension part of Zc* standard ++ extensions for code size reduction, as ratified in commit 8be3419c1c0 ++ ("Zcf doesn't exist on RV64 as it contains no instructions") of ++ riscv-code-size-reduction. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZCMOP`: The Zcmop May-Be-Operations extension is ++ supported as defined in the RISC-V ISA manual starting from commit ++ c732a4f39a4 ("Zcmop is ratified/1.0"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZAWRS`: The Zawrs extension is supported as ++ ratified in commit 98918c844281 ("Merge pull request #1217 from ++ riscv/zawrs") of riscv-isa-manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_SUPM`: The Supm extension is supported as ++ defined in version 1.0 of the RISC-V Pointer Masking extensions. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_CPUPERF_0`: A bitmask that contains performance ++ information about the selected set of processors. ++ ++ * :c:macro:`RISCV_HWPROBE_MISALIGNED_UNKNOWN`: The performance of misaligned ++ accesses is unknown. ++ ++ * :c:macro:`RISCV_HWPROBE_MISALIGNED_EMULATED`: Misaligned accesses are ++ emulated via software, either in or below the kernel. These accesses are ++ always extremely slow. ++ ++ * :c:macro:`RISCV_HWPROBE_MISALIGNED_SLOW`: Misaligned accesses are slower ++ than equivalent byte accesses. Misaligned accesses may be supported ++ directly in hardware, or trapped and emulated by software. ++ ++ * :c:macro:`RISCV_HWPROBE_MISALIGNED_FAST`: Misaligned accesses are faster ++ than equivalent byte accesses. ++ ++ * :c:macro:`RISCV_HWPROBE_MISALIGNED_UNSUPPORTED`: Misaligned accesses are ++ not supported at all and will generate a misaligned address fault. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE`: An unsigned int which ++ represents the size of the Zicboz block in bytes. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS`: An unsigned long which ++ represent the highest userspace virtual address usable. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_TIME_CSR_FREQ`: Frequency (in Hz) of `time CSR`. +diff --git a/Documentation/riscv/index.rst b/Documentation/arch/riscv/index.rst +similarity index 100% +rename from Documentation/riscv/index.rst +rename to Documentation/arch/riscv/index.rst +diff --git a/Documentation/riscv/patch-acceptance.rst b/Documentation/arch/riscv/patch-acceptance.rst +similarity index 100% +rename from Documentation/riscv/patch-acceptance.rst +rename to Documentation/arch/riscv/patch-acceptance.rst +diff --git a/Documentation/riscv/uabi.rst b/Documentation/arch/riscv/uabi.rst +similarity index 100% +rename from Documentation/riscv/uabi.rst +rename to Documentation/arch/riscv/uabi.rst +diff --git a/Documentation/riscv/vector.rst b/Documentation/arch/riscv/vector.rst +similarity index 100% +rename from Documentation/riscv/vector.rst +rename to Documentation/arch/riscv/vector.rst +diff --git a/Documentation/riscv/vm-layout.rst b/Documentation/arch/riscv/vm-layout.rst +similarity index 100% +rename from Documentation/riscv/vm-layout.rst +rename to Documentation/arch/riscv/vm-layout.rst diff --git a/Documentation/devicetree/bindings/hwlock/xuantie,th1520-hwspinlock.yaml b/Documentation/devicetree/bindings/hwlock/xuantie,th1520-hwspinlock.yaml new file mode 100644 index 000000000000..8d36beae9676 @@ -2940,6 +3675,235 @@ index 000000000000..84976f17a4a1 + riscv,group-index-shift = <24>; + }; +... +diff --git a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml +index dc1f28e55266..40e3ebeaa582 100644 +--- a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml ++++ b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml +@@ -119,6 +119,7 @@ allOf: + enum: + - andestech,nceplic100 + - thead,c900-plic ++ - ultrarisc,dp1000-plic + + then: + properties: +diff --git a/Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-sswi.yaml b/Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-sswi.yaml +new file mode 100644 +index 000000000000..8d330906bbbd +--- /dev/null ++++ b/Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-sswi.yaml +@@ -0,0 +1,58 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/interrupt-controller/thead,c900-aclint-sswi.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: T-HEAD C900 ACLINT Supervisor-level Software Interrupt Device ++ ++maintainers: ++ - Inochi Amaoto ++ ++description: ++ The SSWI device is a part of the THEAD ACLINT device. It provides ++ supervisor-level IPI functionality for a set of HARTs on a THEAD ++ platform. It provides a register to set an IPI (SETSSIP) for each ++ HART connected to the SSWI device. ++ ++properties: ++ compatible: ++ items: ++ - enum: ++ - sophgo,sg2044-aclint-sswi ++ - const: thead,c900-aclint-sswi ++ ++ reg: ++ maxItems: 1 ++ ++ "#interrupt-cells": ++ const: 0 ++ ++ interrupt-controller: true ++ ++ interrupts-extended: ++ minItems: 1 ++ maxItems: 4095 ++ ++additionalProperties: false ++ ++required: ++ - compatible ++ - reg ++ - "#interrupt-cells" ++ - interrupt-controller ++ - interrupts-extended ++ ++examples: ++ - | ++ interrupt-controller@94000000 { ++ compatible = "sophgo,sg2044-aclint-sswi", "thead,c900-aclint-sswi"; ++ reg = <0x94000000 0x00004000>; ++ #interrupt-cells = <0>; ++ interrupt-controller; ++ interrupts-extended = <&cpu1intc 1>, ++ <&cpu2intc 1>, ++ <&cpu3intc 1>, ++ <&cpu4intc 1>; ++ }; ++... +diff --git a/Documentation/devicetree/bindings/iommu/riscv,iommu.yaml b/Documentation/devicetree/bindings/iommu/riscv,iommu.yaml +new file mode 100644 +index 000000000000..5d015eeb06d0 +--- /dev/null ++++ b/Documentation/devicetree/bindings/iommu/riscv,iommu.yaml +@@ -0,0 +1,147 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/iommu/riscv,iommu.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: RISC-V IOMMU Architecture Implementation ++ ++maintainers: ++ - Tomasz Jeznach ++ ++description: | ++ The RISC-V IOMMU provides memory address translation and isolation for ++ input and output devices, supporting per-device translation context, ++ shared process address spaces including the ATS and PRI components of ++ the PCIe specification, two stage address translation and MSI remapping. ++ It supports identical translation table format to the RISC-V address ++ translation tables with page level access and protection attributes. ++ Hardware uses in-memory command and fault reporting queues with wired ++ interrupt or MSI notifications. ++ ++ Visit https://github.com/riscv-non-isa/riscv-iommu for more details. ++ ++ For information on assigning RISC-V IOMMU to its peripheral devices, ++ see generic IOMMU bindings. ++ ++properties: ++ # For PCIe IOMMU hardware compatible property should contain the vendor ++ # and device ID according to the PCI Bus Binding specification. ++ # Since PCI provides built-in identification methods, compatible is not ++ # actually required. For non-PCIe hardware implementations 'riscv,iommu' ++ # should be specified along with 'reg' property providing MMIO location. ++ compatible: ++ oneOf: ++ - items: ++ - enum: ++ - qemu,riscv-iommu ++ - const: riscv,iommu ++ - items: ++ - enum: ++ - pci1efd,edf1 ++ - const: riscv,pci-iommu ++ ++ reg: ++ maxItems: 1 ++ description: ++ For non-PCI devices this represents base address and size of for the ++ IOMMU memory mapped registers interface. ++ For PCI IOMMU hardware implementation this should represent an address ++ of the IOMMU, as defined in the PCI Bus Binding reference. ++ ++ '#iommu-cells': ++ const: 1 ++ description: ++ The single cell describes the requester id emitted by a master to the ++ IOMMU. ++ ++ interrupts: ++ minItems: 1 ++ maxItems: 4 ++ description: ++ Wired interrupt vectors available for RISC-V IOMMU to notify the ++ RISC-V HARTS. The cause to interrupt vector is software defined ++ using IVEC IOMMU register. ++ ++ msi-parent: true ++ ++ power-domains: ++ maxItems: 1 ++ ++required: ++ - compatible ++ - reg ++ - '#iommu-cells' ++ ++additionalProperties: false ++ ++examples: ++ - |+ ++ /* Example 1 (IOMMU device with wired interrupts) */ ++ #include ++ ++ iommu1: iommu@1bccd000 { ++ compatible = "qemu,riscv-iommu", "riscv,iommu"; ++ reg = <0x1bccd000 0x1000>; ++ interrupt-parent = <&aplic_smode>; ++ interrupts = <32 IRQ_TYPE_LEVEL_HIGH>, ++ <33 IRQ_TYPE_LEVEL_HIGH>, ++ <34 IRQ_TYPE_LEVEL_HIGH>, ++ <35 IRQ_TYPE_LEVEL_HIGH>; ++ #iommu-cells = <1>; ++ }; ++ ++ /* Device with two IOMMU device IDs, 0 and 7 */ ++ master1 { ++ iommus = <&iommu1 0>, <&iommu1 7>; ++ }; ++ ++ - |+ ++ /* Example 2 (IOMMU device with shared wired interrupt) */ ++ #include ++ ++ iommu2: iommu@1bccd000 { ++ compatible = "qemu,riscv-iommu", "riscv,iommu"; ++ reg = <0x1bccd000 0x1000>; ++ interrupt-parent = <&aplic_smode>; ++ interrupts = <32 IRQ_TYPE_LEVEL_HIGH>; ++ #iommu-cells = <1>; ++ }; ++ ++ - |+ ++ /* Example 3 (IOMMU device with MSIs) */ ++ iommu3: iommu@1bcdd000 { ++ compatible = "qemu,riscv-iommu", "riscv,iommu"; ++ reg = <0x1bccd000 0x1000>; ++ msi-parent = <&imsics_smode>; ++ #iommu-cells = <1>; ++ }; ++ ++ - |+ ++ /* Example 4 (IOMMU PCIe device with MSIs) */ ++ bus { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ pcie@30000000 { ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ reg = <0x0 0x30000000 0x0 0x1000000>; ++ ranges = <0x02000000 0x0 0x41000000 0x0 0x41000000 0x0 0x0f000000>; ++ ++ /* ++ * The IOMMU manages all functions in this PCI domain except ++ * itself. Omit BDF 00:01.0. ++ */ ++ iommu-map = <0x0 &iommu0 0x0 0x8>, ++ <0x9 &iommu0 0x9 0xfff7>; ++ ++ /* The IOMMU programming interface uses slot 00:01.0 */ ++ iommu0: iommu@1,0 { ++ compatible = "pci1efd,edf1", "riscv,pci-iommu"; ++ reg = <0x800 0 0 0 0>; ++ #iommu-cells = <1>; ++ }; ++ }; ++ }; diff --git a/Documentation/devicetree/bindings/mailbox/xuantie-th1520-mailbox.txt b/Documentation/devicetree/bindings/mailbox/xuantie-th1520-mailbox.txt new file mode 100644 index 000000000000..e93195bdb651 @@ -3522,6 +4486,117 @@ index 000000000000..12a23f185577 + }; + }; + }; +diff --git a/Documentation/devicetree/bindings/pinctrl/ultrarisc,dp1000-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/ultrarisc,dp1000-pinctrl.yaml +new file mode 100644 +index 000000000000..3b51686d0d1d +--- /dev/null ++++ b/Documentation/devicetree/bindings/pinctrl/ultrarisc,dp1000-pinctrl.yaml +@@ -0,0 +1,105 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/pinctrl/ultrarisc,dp1000-pinctrl.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: UltraRISC DP1000 Pin Controller ++maintainers: ++ - Jia Wang ++ ++description: | ++ UltraRISC RISC-V SoC DP1000 pin controller. ++ contains the pinmux definitions. ++ ++properties: ++ compatible: ++ const: ultrarisc,dp1000-pinctrl ++ ++ reg: ++ maxItems: 1 ++ ++ pinctrl-single,register-width: ++ description: ++ The width of the register used to configure the pinmux. ++ $ref: /schemas/types.yaml#/definitions/uint32 ++ enum: [32] ++ ++ pinctrl-single,function-mask: ++ description: ++ The mask of the functions that can be selected for the pinmux. ++ $ref: /schemas/types.yaml#/definitions/uint32 ++ enum: [0x3ff] ++ ++ pinctrl-use-default: ++ description: ++ If this property is set, the pinmux will be set to the default value. ++ type: boolean ++ ++patternProperties: ++ '^[a-zA-Z0-9_]+$': ++ type: object ++ additionalProperties: false ++ patternProperties: ++ '-pins$': ++ type: object ++ description: | ++ A pinctrl node should contain at least one subnode representing the ++ pinctrl groups available on the machine. Each subnode will list the ++ pins it needs, and how they should be configured, with regard to ++ muxer configuration, bias etc. ++ properties: ++ pinctrl-pins: ++ description: ++ The list of Pins and their mux settings that properties in the ++ node apply to. The format: `PORT PIN FUNCTION`. ++ minItems: 1 ++ maxItems: 32 ++ items: ++ $ref: /schemas/types.yaml#/definitions/uint32 ++ pinconf-pins: ++ description: ++ The list of Pins and their bias settings that properties in the ++ node apply to. The format: `PORT PIN BIAS`.The BIAS should be ++ set using the UR_DP1000_BIAS macros. ++ minItems: 1 ++ maxItems: 32 ++ items: ++ $ref: /schemas/types.yaml#/definitions/uint32 ++ ++examples: ++ - | ++ pmx0: pinmux@40000000 { ++ compatible = "ultrarisc,dp1000-pinctrl"; ++ reg = <0x0 0x11081000 0x0 0x1000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ #pinctrl-cells = <2>; ++ pinctrl-single,register-width = <32>; ++ pinctrl-single,function-mask = <0x3ff>; ++ pinctrl-use-default; ++ ++ i2c0_pins: i2c0_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_A 12 UR_FUNC0 ++ UR_DP1000_IOMUX_A 13 UR_FUNC0 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_A 12 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 13 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ ++ i2c1_pins: i2c1_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_B 6 UR_FUNC0 ++ UR_DP1000_IOMUX_B 7 UR_FUNC0 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_B 6 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_B 7 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ }; diff --git a/Documentation/devicetree/bindings/pwm/xuantie,th1520-pwm.yaml b/Documentation/devicetree/bindings/pwm/xuantie,th1520-pwm.yaml new file mode 100644 index 000000000000..087b0584887e @@ -3671,6 +4746,21 @@ index fd195c358446..25ba8cf0cc31 100644 + prescaler = <0x8000>; + status = "okay"; +}; +diff --git a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml +index 17c553123f96..ba5c8cd476c7 100644 +--- a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml ++++ b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml +@@ -48,6 +48,10 @@ properties: + - starfive,jh7100-hsuart + - starfive,jh7100-uart + - const: snps,dw-apb-uart ++ - items: ++ - enum: ++ - ultrarisc,dp1000-uart ++ - const: snps,dw-apb-uart + - const: snps,dw-apb-uart + + reg: diff --git a/Documentation/devicetree/bindings/soc/xuantie/xuantie,th1520-event.yaml b/Documentation/devicetree/bindings/soc/xuantie/xuantie,th1520-event.yaml new file mode 100644 index 000000000000..0448f9897cd4 @@ -4257,6 +5347,19 @@ index 000000000000..f4a63904c3bc + }; + }; \ No newline at end of file +diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml +index 93258265c6b0..c4037bf14937 100644 +--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml ++++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml +@@ -1440,6 +1440,8 @@ patternProperties: + description: Ufi Space Co., Ltd. + "^ugoos,.*": + description: Ugoos Industrial Co., Ltd. ++ "^ultrarisc,.*": ++ description: UltraRISC Technology (Shanghai) Co., Ltd. + "^uniwest,.*": + description: United Western Technologies Corp (UniWest) + "^upisemi,.*": diff --git a/Documentation/devicetree/bindings/watchdog/xuantie,th1520-wdt.yaml b/Documentation/devicetree/bindings/watchdog/xuantie,th1520-wdt.yaml new file mode 100644 index 000000000000..23a2bc07210b @@ -4318,6 +5421,136 @@ index 23260ca44946..76597adfb7d5 100644 | s390: | ok | | sh: | TODO | | sparc: | TODO | +diff --git a/Documentation/maintainer/maintainer-entry-profile.rst b/Documentation/maintainer/maintainer-entry-profile.rst +index 6b64072d4bf2..7ad4bfc2cc03 100644 +--- a/Documentation/maintainer/maintainer-entry-profile.rst ++++ b/Documentation/maintainer/maintainer-entry-profile.rst +@@ -101,7 +101,7 @@ to do something different in the near future. + + ../doc-guide/maintainer-profile + ../nvdimm/maintainer-entry-profile +- ../riscv/patch-acceptance ++ ../arch/riscv/patch-acceptance + ../driver-api/media/maintainer-entry-profile + ../driver-api/vfio-pci-device-specific-driver-acceptance + ../nvme/feature-and-quirk-policy +diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst +index b501cd977053..db09a81d474b 100644 +--- a/Documentation/process/index.rst ++++ b/Documentation/process/index.rst +@@ -71,7 +71,7 @@ lack of a better place. + volatile-considered-harmful + botching-up-ioctls + clang-format +- ../riscv/patch-acceptance ++ ../arch/riscv/patch-acceptance + ../core-api/unaligned-memory-access + + .. only:: subproject and html +diff --git a/Documentation/riscv/hwprobe.rst b/Documentation/riscv/hwprobe.rst +deleted file mode 100644 +index a52996b22f75..000000000000 +--- a/Documentation/riscv/hwprobe.rst ++++ /dev/null +@@ -1,98 +0,0 @@ +-.. SPDX-License-Identifier: GPL-2.0 +- +-RISC-V Hardware Probing Interface +---------------------------------- +- +-The RISC-V hardware probing interface is based around a single syscall, which +-is defined in :: +- +- struct riscv_hwprobe { +- __s64 key; +- __u64 value; +- }; +- +- long sys_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, +- size_t cpu_count, cpu_set_t *cpus, +- unsigned int flags); +- +-The arguments are split into three groups: an array of key-value pairs, a CPU +-set, and some flags. The key-value pairs are supplied with a count. Userspace +-must prepopulate the key field for each element, and the kernel will fill in the +-value if the key is recognized. If a key is unknown to the kernel, its key field +-will be cleared to -1, and its value set to 0. The CPU set is defined by +-CPU_SET(3). For value-like keys (eg. vendor/arch/impl), the returned value will +-be only be valid if all CPUs in the given set have the same value. Otherwise -1 +-will be returned. For boolean-like keys, the value returned will be a logical +-AND of the values for the specified CPUs. Usermode can supply NULL for cpus and +-0 for cpu_count as a shortcut for all online CPUs. There are currently no flags, +-this value must be zero for future compatibility. +- +-On success 0 is returned, on failure a negative error code is returned. +- +-The following keys are defined: +- +-* :c:macro:`RISCV_HWPROBE_KEY_MVENDORID`: Contains the value of ``mvendorid``, +- as defined by the RISC-V privileged architecture specification. +- +-* :c:macro:`RISCV_HWPROBE_KEY_MARCHID`: Contains the value of ``marchid``, as +- defined by the RISC-V privileged architecture specification. +- +-* :c:macro:`RISCV_HWPROBE_KEY_MIMPLID`: Contains the value of ``mimplid``, as +- defined by the RISC-V privileged architecture specification. +- +-* :c:macro:`RISCV_HWPROBE_KEY_BASE_BEHAVIOR`: A bitmask containing the base +- user-visible behavior that this kernel supports. The following base user ABIs +- are defined: +- +- * :c:macro:`RISCV_HWPROBE_BASE_BEHAVIOR_IMA`: Support for rv32ima or +- rv64ima, as defined by version 2.2 of the user ISA and version 1.10 of the +- privileged ISA, with the following known exceptions (more exceptions may be +- added, but only if it can be demonstrated that the user ABI is not broken): +- +- * The ``fence.i`` instruction cannot be directly executed by userspace +- programs (it may still be executed in userspace via a +- kernel-controlled mechanism such as the vDSO). +- +-* :c:macro:`RISCV_HWPROBE_KEY_IMA_EXT_0`: A bitmask containing the extensions +- that are compatible with the :c:macro:`RISCV_HWPROBE_BASE_BEHAVIOR_IMA`: +- base system behavior. +- +- * :c:macro:`RISCV_HWPROBE_IMA_FD`: The F and D extensions are supported, as +- defined by commit cd20cee ("FMIN/FMAX now implement +- minimumNumber/maximumNumber, not minNum/maxNum") of the RISC-V ISA manual. +- +- * :c:macro:`RISCV_HWPROBE_IMA_C`: The C extension is supported, as defined +- by version 2.2 of the RISC-V ISA manual. +- +- * :c:macro:`RISCV_HWPROBE_IMA_V`: The V extension is supported, as defined by +- version 1.0 of the RISC-V Vector extension manual. +- +- * :c:macro:`RISCV_HWPROBE_EXT_ZBA`: The Zba address generation extension is +- supported, as defined in version 1.0 of the Bit-Manipulation ISA +- extensions. +- +- * :c:macro:`RISCV_HWPROBE_EXT_ZBB`: The Zbb extension is supported, as defined +- in version 1.0 of the Bit-Manipulation ISA extensions. +- +- * :c:macro:`RISCV_HWPROBE_EXT_ZBS`: The Zbs extension is supported, as defined +- in version 1.0 of the Bit-Manipulation ISA extensions. +- +-* :c:macro:`RISCV_HWPROBE_KEY_CPUPERF_0`: A bitmask that contains performance +- information about the selected set of processors. +- +- * :c:macro:`RISCV_HWPROBE_MISALIGNED_UNKNOWN`: The performance of misaligned +- accesses is unknown. +- +- * :c:macro:`RISCV_HWPROBE_MISALIGNED_EMULATED`: Misaligned accesses are +- emulated via software, either in or below the kernel. These accesses are +- always extremely slow. +- +- * :c:macro:`RISCV_HWPROBE_MISALIGNED_SLOW`: Misaligned accesses are slower +- than equivalent byte accesses. Misaligned accesses may be supported +- directly in hardware, or trapped and emulated by software. +- +- * :c:macro:`RISCV_HWPROBE_MISALIGNED_FAST`: Misaligned accesses are faster +- than equivalent byte accesses. +- +- * :c:macro:`RISCV_HWPROBE_MISALIGNED_UNSUPPORTED`: Misaligned accesses are +- not supported at all and will generate a misaligned address fault. diff --git a/Documentation/scheduler/index.rst b/Documentation/scheduler/index.rst index 3170747226f6..43bd8a145b7a 100644 --- a/Documentation/scheduler/index.rst @@ -4375,11 +5608,115 @@ index 000000000000..2387804b1c63 + +The barrier matches a full barrier in the proximity of the membarrier system call +entry, cf. membarrier_{private,global}_expedited(). +diff --git a/Documentation/translations/it_IT/riscv/patch-acceptance.rst b/Documentation/translations/it_IT/riscv/patch-acceptance.rst +index edf67252b3fb..2d7afb1f6959 100644 +--- a/Documentation/translations/it_IT/riscv/patch-acceptance.rst ++++ b/Documentation/translations/it_IT/riscv/patch-acceptance.rst +@@ -1,6 +1,6 @@ + .. include:: ../disclaimer-ita.rst + +-:Original: :doc:`../../../riscv/patch-acceptance` ++:Original: :doc:`../../../arch/riscv/patch-acceptance` + :Translator: Federico Vaga + + arch/riscv linee guida alla manutenzione per gli sviluppatori +diff --git a/Documentation/translations/zh_CN/arch/index.rst b/Documentation/translations/zh_CN/arch/index.rst +index e3d273d7d599..c4c2e16f629c 100644 +--- a/Documentation/translations/zh_CN/arch/index.rst ++++ b/Documentation/translations/zh_CN/arch/index.rst +@@ -10,7 +10,7 @@ + + mips/index + arm64/index +- ../riscv/index ++ ../arch/riscv/index + openrisc/index + parisc/index + loongarch/index +diff --git a/Documentation/translations/zh_CN/riscv/boot-image-header.rst b/Documentation/translations/zh_CN/arch/riscv/boot-image-header.rst +similarity index 96% +rename from Documentation/translations/zh_CN/riscv/boot-image-header.rst +rename to Documentation/translations/zh_CN/arch/riscv/boot-image-header.rst +index 0234c28a7114..779b5172fe24 100644 +--- a/Documentation/translations/zh_CN/riscv/boot-image-header.rst ++++ b/Documentation/translations/zh_CN/arch/riscv/boot-image-header.rst +@@ -1,6 +1,6 @@ +-.. include:: ../disclaimer-zh_CN.rst ++.. include:: ../../disclaimer-zh_CN.rst + +-:Original: Documentation/riscv/boot-image-header.rst ++:Original: Documentation/arch/riscv/boot-image-header.rst + + :翻译: + +diff --git a/Documentation/translations/zh_CN/riscv/index.rst b/Documentation/translations/zh_CN/arch/riscv/index.rst +similarity index 79% +rename from Documentation/translations/zh_CN/riscv/index.rst +rename to Documentation/translations/zh_CN/arch/riscv/index.rst +index 131e405aa857..3b041c116169 100644 +--- a/Documentation/translations/zh_CN/riscv/index.rst ++++ b/Documentation/translations/zh_CN/arch/riscv/index.rst +@@ -1,8 +1,8 @@ + .. SPDX-License-Identifier: GPL-2.0 + +-.. include:: ../disclaimer-zh_CN.rst ++.. include:: ../../disclaimer-zh_CN.rst + +-:Original: Documentation/riscv/index.rst ++:Original: Documentation/arch/riscv/index.rst + + :翻译: + +diff --git a/Documentation/translations/zh_CN/riscv/patch-acceptance.rst b/Documentation/translations/zh_CN/arch/riscv/patch-acceptance.rst +similarity index 93% +rename from Documentation/translations/zh_CN/riscv/patch-acceptance.rst +rename to Documentation/translations/zh_CN/arch/riscv/patch-acceptance.rst +index d180d24717bf..c8eb230ca8ee 100644 +--- a/Documentation/translations/zh_CN/riscv/patch-acceptance.rst ++++ b/Documentation/translations/zh_CN/arch/riscv/patch-acceptance.rst +@@ -1,8 +1,8 @@ + .. SPDX-License-Identifier: GPL-2.0 + +-.. include:: ../disclaimer-zh_CN.rst ++.. include:: ../../disclaimer-zh_CN.rst + +-:Original: Documentation/riscv/patch-acceptance.rst ++:Original: Documentation/arch/riscv/patch-acceptance.rst + + :翻译: + +diff --git a/Documentation/translations/zh_CN/riscv/vm-layout.rst b/Documentation/translations/zh_CN/arch/riscv/vm-layout.rst +similarity index 98% +rename from Documentation/translations/zh_CN/riscv/vm-layout.rst +rename to Documentation/translations/zh_CN/arch/riscv/vm-layout.rst +index 91884e2dfff8..4b9f4dcf6c19 100644 +--- a/Documentation/translations/zh_CN/riscv/vm-layout.rst ++++ b/Documentation/translations/zh_CN/arch/riscv/vm-layout.rst +@@ -1,7 +1,7 @@ + .. SPDX-License-Identifier: GPL-2.0 +-.. include:: ../disclaimer-zh_CN.rst ++.. include:: ../../disclaimer-zh_CN.rst + +-:Original: Documentation/riscv/vm-layout.rst ++:Original: Documentation/arch/riscv/vm-layout.rst + + :翻译: + +diff --git a/Documentation/translations/zh_CN/maintainer/maintainer-entry-profile.rst b/Documentation/translations/zh_CN/maintainer/maintainer-entry-profile.rst +index a1ee99c4786e..0f5acfb1012e 100644 +--- a/Documentation/translations/zh_CN/maintainer/maintainer-entry-profile.rst ++++ b/Documentation/translations/zh_CN/maintainer/maintainer-entry-profile.rst +@@ -89,4 +89,4 @@ + + ../doc-guide/maintainer-profile + ../../../nvdimm/maintainer-entry-profile +- ../../../riscv/patch-acceptance ++ ../../../arch/riscv/patch-acceptance diff --git a/MAINTAINERS b/MAINTAINERS -index b22778025fa0..bd37632b3ca5 100644 +index 33eeabab5088..c34609dd468e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS -@@ -13781,7 +13781,9 @@ M: Mathieu Desnoyers +@@ -13822,7 +13822,9 @@ M: Mathieu Desnoyers M: "Paul E. McKenney" L: linux-kernel@vger.kernel.org S: Supported @@ -4389,7 +5726,7 @@ index b22778025fa0..bd37632b3ca5 100644 F: include/uapi/linux/membarrier.h F: kernel/sched/membarrier.c -@@ -18523,6 +18525,20 @@ S: Maintained +@@ -18571,6 +18573,20 @@ S: Maintained F: drivers/mtd/nand/raw/r852.c F: drivers/mtd/nand/raw/r852.h @@ -4410,7 +5747,30 @@ index b22778025fa0..bd37632b3ca5 100644 RISC-V ARCHITECTURE M: Paul Walmsley M: Palmer Dabbelt -@@ -18594,6 +18610,8 @@ M: Fu Wei +@@ -18579,12 +18595,21 @@ L: linux-riscv@lists.infradead.org + S: Supported + Q: https://patchwork.kernel.org/project/linux-riscv/list/ + C: irc://irc.libera.chat/riscv +-P: Documentation/riscv/patch-acceptance.rst ++P: Documentation/arch/riscv/patch-acceptance.rst + T: git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git + F: arch/riscv/ + N: riscv + K: riscv + ++RISC-V IOMMU ++M: Tomasz Jeznach ++L: iommu@lists.linux.dev ++L: linux-riscv@lists.infradead.org ++S: Maintained ++T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git ++F: Documentation/devicetree/bindings/iommu/riscv,iommu.yaml ++F: drivers/iommu/riscv/ ++ + RISC-V MICROCHIP FPGA SUPPORT + M: Conor Dooley + M: Daire McNamara +@@ -18642,6 +18667,8 @@ M: Fu Wei L: linux-riscv@lists.infradead.org S: Maintained F: arch/riscv/boot/dts/thead/ @@ -4419,6 +5779,18 @@ index b22778025fa0..bd37632b3ca5 100644 RNBD BLOCK DRIVERS M: Md. Haris Iqbal +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig +index 5fb0190e8e9a..4c7454c05fe1 100644 +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -1619,7 +1619,6 @@ config ARM64_BOOTPARAM_HOTPLUG_CPU0 + config NUMA + bool "NUMA Memory Allocation and Scheduler Support" + select GENERIC_ARCH_NUMA +- select ACPI_NUMA if ACPI + select OF_NUMA + select HAVE_SETUP_PER_CPU_AREA + select NEED_PER_CPU_EMBED_FIRST_CHUNK diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 2c29239d05c3..846c563689a8 100644 --- a/arch/arm64/include/asm/tlb.h @@ -4435,6 +5807,237 @@ index 2c29239d05c3..846c563689a8 100644 } #endif +diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c +index f872c57e9909..fd9a7bed83ce 100644 +--- a/arch/arm64/kernel/pci.c ++++ b/arch/arm64/kernel/pci.c +@@ -6,28 +6,7 @@ + * Copyright (C) 2014 ARM Ltd. + */ + +-#include +-#include +-#include +-#include +-#include + #include +-#include +-#include +-#include +- +-#ifdef CONFIG_ACPI +-/* +- * Try to assign the IRQ number when probing a new device +- */ +-int pcibios_alloc_irq(struct pci_dev *dev) +-{ +- if (!acpi_disabled) +- acpi_pci_irq_enable(dev); +- +- return 0; +-} +-#endif + + /* + * raw_pci_read/write - Platform-specific PCI config space access. +@@ -61,173 +40,3 @@ int pcibus_to_node(struct pci_bus *bus) + EXPORT_SYMBOL(pcibus_to_node); + + #endif +- +-#ifdef CONFIG_ACPI +- +-struct acpi_pci_generic_root_info { +- struct acpi_pci_root_info common; +- struct pci_config_window *cfg; /* config space mapping */ +-}; +- +-int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) +-{ +- struct pci_config_window *cfg = bus->sysdata; +- struct acpi_device *adev = to_acpi_device(cfg->parent); +- struct acpi_pci_root *root = acpi_driver_data(adev); +- +- return root->segment; +-} +- +-int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) +-{ +- struct pci_config_window *cfg; +- struct acpi_device *adev; +- struct device *bus_dev; +- +- if (acpi_disabled) +- return 0; +- +- cfg = bridge->bus->sysdata; +- +- /* +- * On Hyper-V there is no corresponding ACPI device for a root bridge, +- * therefore ->parent is set as NULL by the driver. And set 'adev' as +- * NULL in this case because there is no proper ACPI device. +- */ +- if (!cfg->parent) +- adev = NULL; +- else +- adev = to_acpi_device(cfg->parent); +- +- bus_dev = &bridge->bus->dev; +- +- ACPI_COMPANION_SET(&bridge->dev, adev); +- set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev))); +- +- return 0; +-} +- +-static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci) +-{ +- struct resource_entry *entry, *tmp; +- int status; +- +- status = acpi_pci_probe_root_resources(ci); +- resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { +- if (!(entry->res->flags & IORESOURCE_WINDOW)) +- resource_list_destroy_entry(entry); +- } +- return status; +-} +- +-/* +- * Lookup the bus range for the domain in MCFG, and set up config space +- * mapping. +- */ +-static struct pci_config_window * +-pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root) +-{ +- struct device *dev = &root->device->dev; +- struct resource *bus_res = &root->secondary; +- u16 seg = root->segment; +- const struct pci_ecam_ops *ecam_ops; +- struct resource cfgres; +- struct acpi_device *adev; +- struct pci_config_window *cfg; +- int ret; +- +- ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops); +- if (ret) { +- dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res); +- return NULL; +- } +- +- adev = acpi_resource_consumer(&cfgres); +- if (adev) +- dev_info(dev, "ECAM area %pR reserved by %s\n", &cfgres, +- dev_name(&adev->dev)); +- else +- dev_warn(dev, FW_BUG "ECAM area %pR not reserved in ACPI namespace\n", +- &cfgres); +- +- cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops); +- if (IS_ERR(cfg)) { +- dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, +- PTR_ERR(cfg)); +- return NULL; +- } +- +- return cfg; +-} +- +-/* release_info: free resources allocated by init_info */ +-static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci) +-{ +- struct acpi_pci_generic_root_info *ri; +- +- ri = container_of(ci, struct acpi_pci_generic_root_info, common); +- pci_ecam_free(ri->cfg); +- kfree(ci->ops); +- kfree(ri); +-} +- +-/* Interface called from ACPI code to setup PCI host controller */ +-struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) +-{ +- struct acpi_pci_generic_root_info *ri; +- struct pci_bus *bus, *child; +- struct acpi_pci_root_ops *root_ops; +- struct pci_host_bridge *host; +- +- ri = kzalloc(sizeof(*ri), GFP_KERNEL); +- if (!ri) +- return NULL; +- +- root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL); +- if (!root_ops) { +- kfree(ri); +- return NULL; +- } +- +- ri->cfg = pci_acpi_setup_ecam_mapping(root); +- if (!ri->cfg) { +- kfree(ri); +- kfree(root_ops); +- return NULL; +- } +- +- root_ops->release_info = pci_acpi_generic_release_info; +- root_ops->prepare_resources = pci_acpi_root_prepare_resources; +- root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops; +- bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg); +- if (!bus) +- return NULL; +- +- /* If we must preserve the resource configuration, claim now */ +- host = pci_find_host_bridge(bus); +- if (host->preserve_config) +- pci_bus_claim_resources(bus); +- +- /* +- * Assign whatever was left unassigned. If we didn't claim above, +- * this will reassign everything. +- */ +- pci_assign_unassigned_root_bus_resources(bus); +- +- list_for_each_entry(child, &bus->children, node) +- pcie_bus_configure_settings(child); +- +- return bus; +-} +- +-void pcibios_add_bus(struct pci_bus *bus) +-{ +- acpi_pci_add_bus(bus); +-} +- +-void pcibios_remove_bus(struct pci_bus *bus) +-{ +- acpi_pci_remove_bus(bus); +-} +- +-#endif +diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig +index 53faa122b0f4..88182df75060 100644 +--- a/arch/ia64/Kconfig ++++ b/arch/ia64/Kconfig +@@ -16,7 +16,6 @@ config IA64 + select ARCH_MIGHT_HAVE_PC_PARPORT + select ARCH_MIGHT_HAVE_PC_SERIO + select ACPI +- select ACPI_NUMA if NUMA + select ARCH_ENABLE_MEMORY_HOTPLUG + select ARCH_ENABLE_MEMORY_HOTREMOVE + select ARCH_SUPPORTS_ACPI +diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig +index 8739e15c137b..4ce4b491edcd 100644 +--- a/arch/loongarch/Kconfig ++++ b/arch/loongarch/Kconfig +@@ -468,7 +468,6 @@ config NR_CPUS + config NUMA + bool "NUMA Support" + select SMP +- select ACPI_NUMA if ACPI + help + Say Y to compile the kernel with NUMA (Non-Uniform Memory Access) + support. This option improves performance on systems with more diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h index c9f9895f237d..a7b9c9e73593 100644 --- a/arch/loongarch/include/asm/pgalloc.h @@ -4447,6 +6050,33 @@ index c9f9895f237d..a7b9c9e73593 100644 pud = ptdesc_address(ptdesc); pud_init(pud); +diff --git a/arch/loongarch/kernel/dma.c b/arch/loongarch/kernel/dma.c +index 34836408b15a..e24b6f9ea208 100644 +--- a/arch/loongarch/kernel/dma.c ++++ b/arch/loongarch/kernel/dma.c +@@ -17,7 +17,7 @@ EXPORT_SYMBOL_GPL(node_id_offset); + void acpi_arch_dma_setup(struct device *dev) + { + int ret; +- u64 mask, end = 0; ++ u64 mask, end; + const struct bus_dma_region *map = NULL; + + if (node_id_offset == 0) { +@@ -27,12 +27,7 @@ void acpi_arch_dma_setup(struct device *dev) + + ret = acpi_dma_get_range(dev, &map); + if (!ret && map) { +- const struct bus_dma_region *r = map; +- +- for (end = 0; r->size; r++) { +- if (r->dma_start + r->size - 1 > end) +- end = r->dma_start + r->size - 1; +- } ++ end = dma_range_map_max(map); + + mask = DMA_BIT_MASK(ilog2(end) + 1); + dev->bus_dma_limit = end; diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index 40e40a7eb94a..f4440edcd8fe 100644 --- a/arch/mips/include/asm/pgalloc.h @@ -4460,11 +6090,25 @@ index 40e40a7eb94a..f4440edcd8fe 100644 pud_init(pud); diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig -index 3be10e723b2c..7b9125dd1209 100644 +index 3be10e723b2c..b20dc27077c2 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig -@@ -28,18 +28,22 @@ config RISCV +@@ -13,7 +13,10 @@ config 32BIT + config RISCV + def_bool y + select ACPI_GENERIC_GSI if ACPI ++ select ACPI_PPTT if ACPI ++ select ACPI_MCFG if (ACPI && PCI) + select ACPI_REDUCED_HARDWARE_ONLY if ACPI ++ select ACPI_SPCR_TABLE if ACPI + select ARCH_DMA_DEFAULT_COHERENT + select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION + select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2 +@@ -26,20 +29,25 @@ config RISCV + select ARCH_HAS_FORTIFY_SOURCE + select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GIGANTIC_PAGE ++ select ARCH_HAS_HW_PTE_YOUNG select ARCH_HAS_KCOV select ARCH_HAS_MEMBARRIER_CALLBACKS + select ARCH_HAS_MEMBARRIER_SYNC_CORE @@ -4482,11 +6126,11 @@ index 3be10e723b2c..7b9125dd1209 100644 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAS_VDSO_DATA -+ select ARCH_KEEP_MEMBLOCK ++ select ARCH_KEEP_MEMBLOCK if ACPI select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT select ARCH_STACKWALK -@@ -64,7 +68,7 @@ config RISCV +@@ -64,7 +72,7 @@ config RISCV select CLINT_TIMER if !MMU select CLONE_BACKWARDS select COMMON_CLK @@ -4495,7 +6139,7 @@ index 3be10e723b2c..7b9125dd1209 100644 select EDAC_SUPPORT select FRAME_POINTER if PERF_EVENTS || (FUNCTION_TRACER && !DYNAMIC_FTRACE) select GENERIC_ARCH_TOPOLOGY -@@ -119,6 +123,7 @@ config RISCV +@@ -119,6 +127,7 @@ config RISCV select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION select HAVE_EBPF_JIT if MMU @@ -4503,7 +6147,7 @@ index 3be10e723b2c..7b9125dd1209 100644 select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_FUNCTION_ERROR_INJECTION select HAVE_GCC_PLUGINS -@@ -147,6 +152,7 @@ config RISCV +@@ -147,14 +156,18 @@ config RISCV select IRQ_FORCED_THREADING select KASAN_VMALLOC if KASAN select LOCK_MM_AND_FIND_VMA @@ -4511,8 +6155,10 @@ index 3be10e723b2c..7b9125dd1209 100644 select MODULES_USE_ELF_RELA if MODULES select MODULE_SECTIONS if MODULES select OF -@@ -155,6 +161,8 @@ config RISCV + select OF_EARLY_FLATTREE + select OF_IRQ select PCI_DOMAINS_GENERIC if PCI ++ select PCI_ECAM if (ACPI && PCI) select PCI_MSI if PCI select RISCV_ALTERNATIVE if !XIP_KERNEL + select RISCV_APLIC @@ -4520,7 +6166,7 @@ index 3be10e723b2c..7b9125dd1209 100644 select RISCV_INTC select RISCV_TIMER if RISCV_SBI select SIFIVE_PLIC -@@ -223,6 +231,20 @@ config KASAN_SHADOW_OFFSET +@@ -223,6 +236,20 @@ config KASAN_SHADOW_OFFSET default 0xdfffffff00000000 if 64BIT default 0xffffffff if 32BIT @@ -4541,7 +6187,7 @@ index 3be10e723b2c..7b9125dd1209 100644 config ARCH_FLATMEM_ENABLE def_bool !NUMA -@@ -281,6 +303,7 @@ config RISCV_DMA_NONCOHERENT +@@ -281,6 +308,7 @@ config RISCV_DMA_NONCOHERENT select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select DMA_BOUNCE_UNALIGNED_KMALLOC if SWIOTLB @@ -4549,7 +6195,15 @@ index 3be10e723b2c..7b9125dd1209 100644 config RISCV_NONSTANDARD_CACHE_OPS bool -@@ -507,7 +530,7 @@ config RISCV_ISA_V +@@ -298,6 +326,7 @@ config AS_HAS_OPTION_ARCH + + source "arch/riscv/Kconfig.socs" + source "arch/riscv/Kconfig.errata" ++source "arch/riscv/Kconfig.vendor" + + menu "Platform type" + +@@ -507,7 +536,7 @@ config RISCV_ISA_V depends on TOOLCHAIN_HAS_V depends on FPU select DYNAMIC_SIGFRAME @@ -4558,7 +6212,105 @@ index 3be10e723b2c..7b9125dd1209 100644 help Say N here if you want to disable all vector related procedure in the kernel. -@@ -697,6 +720,20 @@ config ARCH_SUPPORTS_KEXEC_PURGATORY +@@ -525,6 +554,53 @@ config RISCV_ISA_V_DEFAULT_ENABLE + + If you don't know what to do here, say Y. + ++config RISCV_ISA_ZAWRS ++ bool "Zawrs extension support for more efficient busy waiting" ++ depends on RISCV_ALTERNATIVE ++ default y ++ help ++ The Zawrs extension defines instructions to be used in polling loops ++ which allow a hart to enter a low-power state or to trap to the ++ hypervisor while waiting on a store to a memory location. Enable the ++ use of these instructions in the kernel when the Zawrs extension is ++ detected at boot. ++ ++config TOOLCHAIN_HAS_ZABHA ++ bool ++ default y ++ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zabha) ++ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zabha) ++ depends on AS_HAS_OPTION_ARCH ++ ++config RISCV_ISA_ZABHA ++ bool "Zabha extension support for atomic byte/halfword operations" ++ depends on TOOLCHAIN_HAS_ZABHA ++ depends on RISCV_ALTERNATIVE ++ default y ++ help ++ Enable the use of the Zabha ISA-extension to implement kernel ++ byte/halfword atomic memory operations when it is detected at boot. ++ ++ If you don't know what to do here, say Y. ++ ++config TOOLCHAIN_HAS_ZACAS ++ bool ++ default y ++ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zacas) ++ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zacas) ++ depends on AS_HAS_OPTION_ARCH ++ ++config RISCV_ISA_ZACAS ++ bool "Zacas extension support for atomic CAS" ++ depends on TOOLCHAIN_HAS_ZACAS ++ depends on RISCV_ALTERNATIVE ++ default y ++ help ++ Enable the use of the Zacas ISA-extension to implement kernel atomic ++ cmpxchg operations when it is detected at boot. ++ ++ If you don't know what to do here, say Y. ++ + config TOOLCHAIN_HAS_ZBB + bool + default y +@@ -549,6 +625,29 @@ config RISCV_ISA_ZBB + + If you don't know what to do here, say Y. + ++config TOOLCHAIN_HAS_ZBC ++ bool ++ default y ++ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zbc) ++ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zbc) ++ depends on LLD_VERSION >= 150000 || LD_VERSION >= 23900 ++ depends on AS_HAS_OPTION_ARCH ++ ++config RISCV_ISA_ZBC ++ bool "Zbc extension support for carry-less multiplication instructions" ++ depends on TOOLCHAIN_HAS_ZBC ++ depends on MMU ++ depends on RISCV_ALTERNATIVE ++ default y ++ help ++ Adds support to dynamically detect the presence of the Zbc ++ extension (carry-less multiplication) and enable its usage. ++ ++ The Zbc extension could accelerate CRC (cyclic redundancy check) ++ calculations. ++ ++ If you don't know what to do here, say Y. ++ + config RISCV_ISA_ZICBOM + bool "Zicbom extension support for non-coherent DMA operation" + depends on MMU +@@ -579,13 +678,6 @@ config RISCV_ISA_ZICBOZ + + If you don't know what to do here, say Y. + +-config TOOLCHAIN_HAS_ZIHINTPAUSE +- bool +- default y +- depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zihintpause) +- depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zihintpause) +- depends on LLD_VERSION >= 150000 || LD_VERSION >= 23600 +- + config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI + def_bool y + # https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc +@@ -697,6 +789,20 @@ config ARCH_SUPPORTS_KEXEC_PURGATORY config ARCH_SUPPORTS_CRASH_DUMP def_bool y @@ -4579,8 +6331,26 @@ index 3be10e723b2c..7b9125dd1209 100644 config COMPAT bool "Kernel support for 32-bit U-mode" default 64BIT +@@ -811,6 +917,17 @@ config EFI + allow the kernel to be booted as an EFI application. This + is only useful on systems that have UEFI firmware. + ++config DMI ++ bool "Enable support for SMBIOS (DMI) tables" ++ depends on EFI ++ default y ++ help ++ This enables SMBIOS/DMI feature for systems. ++ ++ This option is only useful on systems that have UEFI firmware. ++ However, even with this option, the resultant kernel should ++ continue to boot on existing non-UEFI platforms. ++ + config CC_HAVE_STACKPROTECTOR_TLS + def_bool $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=tp -mstack-protector-guard-offset=0) + diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs -index 30fd6a512828..81faf077bde7 100644 +index 30fd6a512828..046bbbfb0010 100644 --- a/arch/riscv/Kconfig.socs +++ b/arch/riscv/Kconfig.socs @@ -22,6 +22,11 @@ config SOC_SIFIVE @@ -4595,7 +6365,7 @@ index 30fd6a512828..81faf077bde7 100644 config ARCH_STARFIVE def_bool SOC_STARFIVE -@@ -49,6 +54,13 @@ config ARCH_THEAD +@@ -49,6 +54,23 @@ config ARCH_THEAD help This enables support for the RISC-V based T-HEAD SoCs. @@ -4605,15 +6375,92 @@ index 30fd6a512828..81faf077bde7 100644 + select ERRATA_THEAD + help + This enables support for the RISC-V based XuanTie SoCs. ++ ++config ARCH_ULTRARISC ++ bool "UltraRISC RISC-V SoCs" ++ depends on MMU && !XIP_KERNEL ++ help ++ This enables support for UltraRISC SoC platform hardware, ++ including boards based on the UR-DP1000. ++ UR-DP1000 is an 8-core 64-bit RISC-V SoC that supports ++ the RV64GCBHX ISA. Supports Hardware Virtualization, ++ RISC-V RV64 ISA H(v1.0) Extension. + config ARCH_VIRT def_bool SOC_VIRT +@@ -111,4 +133,41 @@ config SOC_CANAAN_K210_DTB_SOURCE + + endif # ARCH_CANAAN + ++config SOC_SPACEMIT ++ bool "Spacemit SoCs" ++ select SIFIVE_PLIC ++ help ++ This enables support for Spacemit SoCs platform hardware. ++ ++if SOC_SPACEMIT ++ ++choice ++ prompt "Spacemit SoCs platform" ++ default SOC_SPACEMIT_K1 ++ help ++ choice Spacemit Soc platform ++ ++ config SOC_SPACEMIT_K1 ++ bool "k1" ++ help ++ select Spacemit k1 Platform SoCs. ++endchoice ++ ++if SOC_SPACEMIT_K1 ++ ++choice ++ prompt "Spacemit K1 serial SoCs" ++ default SOC_SPACEMIT_K1X ++ help ++ choice Spacemit K1 SoC platform ++ ++ config SOC_SPACEMIT_K1X ++ bool "k1-x" ++ help ++ This enables support for Spacemit k1-x Platform Hardware. ++endchoice ++ ++endif ++endif ++ + endmenu # "SoC selection" +diff --git a/arch/riscv/Kconfig.vendor b/arch/riscv/Kconfig.vendor +new file mode 100644 +index 000000000000..6f1cdd32ed29 +--- /dev/null ++++ b/arch/riscv/Kconfig.vendor +@@ -0,0 +1,19 @@ ++menu "Vendor extensions" ++ ++config RISCV_ISA_VENDOR_EXT ++ bool ++ ++menu "Andes" ++config RISCV_ISA_VENDOR_EXT_ANDES ++ bool "Andes vendor extension support" ++ select RISCV_ISA_VENDOR_EXT ++ default y ++ help ++ Say N here if you want to disable all Andes vendor extension ++ support. This will cause any Andes vendor extensions that are ++ requested by hardware probing to be ignored. ++ ++ If you don't know what to do here, say Y. ++endmenu ++ ++endmenu diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile -index b43a6bb7e4dc..c33a055a06f3 100644 +index b43a6bb7e4dc..30099b367479 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile -@@ -54,22 +54,7 @@ endif +@@ -54,22 +54,13 @@ endif endif endif @@ -4630,14 +6477,19 @@ index b43a6bb7e4dc..c33a055a06f3 100644 -else -riscv-march-$(CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI) := $(riscv-march-y)_zicsr_zifencei -endif -- ++include $(srctree)/arch/riscv/Makefile.isa ++ ++# Check if the toolchain supports Zacas ++riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZACAS) := $(riscv-march-y)_zacas + -# Check if the toolchain supports Zihintpause extension -riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE) := $(riscv-march-y)_zihintpause -+include $(srctree)/arch/riscv/Makefile.isa ++# Check if the toolchain supports Zabha ++riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZABHA) := $(riscv-march-y)_zabha # Remove F,D,V from isa string for all. Keep extensions between "fd" and "v" by # matching non-v and non-multi-letter extensions out with the filter ([^v_]*) -@@ -152,7 +137,7 @@ ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_ARCH_CANAAN),yy) +@@ -152,7 +143,7 @@ ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_ARCH_CANAAN),yy) KBUILD_IMAGE := $(boot)/loader.bin else ifeq ($(CONFIG_EFI_ZBOOT),) @@ -4648,10 +6500,10 @@ index b43a6bb7e4dc..c33a055a06f3 100644 endif diff --git a/arch/riscv/Makefile.isa b/arch/riscv/Makefile.isa new file mode 100644 -index 000000000000..322a83958b96 +index 000000000000..279f24f32763 --- /dev/null +++ b/arch/riscv/Makefile.isa -@@ -0,0 +1,18 @@ +@@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-only + +# ISA string setting @@ -4667,21 +6519,21 @@ index 000000000000..322a83958b96 +else +riscv-march-$(CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI) := $(riscv-march-y)_zicsr_zifencei +endif -+ -+# Check if the toolchain supports Zihintpause extension -+riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE) := $(riscv-march-y)_zihintpause diff --git a/arch/riscv/boot/dts/Makefile b/arch/riscv/boot/dts/Makefile -index f60a280abb15..72030fd727af 100644 +index f60a280abb15..8b16a3d7d6d5 100644 --- a/arch/riscv/boot/dts/Makefile +++ b/arch/riscv/boot/dts/Makefile -@@ -4,6 +4,7 @@ subdir-y += canaan +@@ -4,7 +4,10 @@ subdir-y += canaan subdir-y += microchip subdir-y += renesas subdir-y += sifive +subdir-y += sophgo ++subdir-y += spacemit subdir-y += starfive subdir-y += thead ++subdir-y += ultrarisc + obj-$(CONFIG_BUILTIN_DTB) := $(addsuffix /, $(subdir-y)) diff --git a/arch/riscv/boot/dts/sophgo/Makefile b/arch/riscv/boot/dts/sophgo/Makefile new file mode 100644 index 000000000000..6e7c7763b0a9 @@ -12769,6 +14621,2893 @@ index 000000000000..57f304fc778f + stdout-path = "serial0"; + }; +}; +diff --git a/arch/riscv/boot/dts/spacemit/Makefile b/arch/riscv/boot/dts/spacemit/Makefile +new file mode 100644 +index 000000000000..bc18f5f5cec9 +--- /dev/null ++++ b/arch/riscv/boot/dts/spacemit/Makefile +@@ -0,0 +1,2 @@ ++dtb-$(CONFIG_SOC_SPACEMIT_K1X) += k1-bananapi-f3.dtb ++obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y)) +diff --git a/arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts b/arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts +new file mode 100644 +index 000000000000..16f7a19f701f +--- /dev/null ++++ b/arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts +@@ -0,0 +1,448 @@ ++// SPDX-License-Identifier: (GPL-2.0 OR MIT) ++/* Copyright (c) 2023 Spacemit, Inc */ ++ ++/dts-v1/; ++ ++#include "k1-x.dtsi" ++#include "k1-x_pinctrl.dtsi" ++ ++/ { ++ model = "Banana Pi BPI-F3"; ++ ++ memory@0 { ++ device_type = "memory"; ++ reg = <0x0 0x00000000 0x0 0x80000000>; ++ }; ++ ++ memory@100000000 { ++ device_type = "memory"; ++ reg = <0x1 0x00000000 0x0 0x80000000>; ++ }; ++ ++ chosen { ++ bootargs = "earlycon=sbi console=ttySP0,115200n8 loglevel=8 rdinit=/init"; ++ stdout-path = "serial0:115200n8"; ++ }; ++}; ++ ++&pinctrl { ++ pinctrl-single,gpio-range = < ++ &range GPIO_49 2 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ &range GPIO_58 1 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range GPIO_63 2 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range GPIO_65 1 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range GPIO_67 1 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ &range PRI_TDI 2 (MUX_MODE1 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range PRI_TCK 1 (MUX_MODE1 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ &range PRI_TDO 1 (MUX_MODE1 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range GPIO_74 1 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range GPIO_80 1 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ &range GPIO_81 3 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range GPIO_90 1 (MUX_MODE0 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ &range GPIO_91 2 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range DVL0 1 (MUX_MODE1 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ &range DVL1 1 (MUX_MODE1 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS0) ++ &range GPIO_110 1 (MUX_MODE0 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ &range GPIO_111 1 (MUX_MODE0 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ &range GPIO_113 1 (MUX_MODE0 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ &range GPIO_114 1 (MUX_MODE0 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ &range GPIO_115 1 (MUX_MODE0 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ &range GPIO_116 1 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range GPIO_118 1 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range GPIO_123 1 (MUX_MODE0 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS0) ++ &range GPIO_124 1 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range GPIO_125 3 (MUX_MODE0 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ >; ++}; ++ ++&gpio{ ++ gpio-ranges = < ++ &pinctrl 49 GPIO_49 2 ++ &pinctrl 58 GPIO_58 1 ++ &pinctrl 63 GPIO_63 3 ++ &pinctrl 67 GPIO_67 1 ++ &pinctrl 70 PRI_TDI 4 ++ &pinctrl 74 GPIO_74 1 ++ &pinctrl 80 GPIO_80 4 ++ &pinctrl 90 GPIO_90 3 ++ &pinctrl 96 DVL0 2 ++ &pinctrl 110 GPIO_110 1 ++ &pinctrl 111 GPIO_111 1 ++ &pinctrl 113 GPIO_113 1 ++ &pinctrl 114 GPIO_114 3 ++ &pinctrl 118 GPIO_118 1 ++ &pinctrl 123 GPIO_123 5 ++ >; ++}; ++ ++&uart0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_uart0_2>; ++ status = "okay"; ++}; ++ ++&spi3 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ssp3_0>; ++ status = "okay"; ++}; ++ ++&qspi { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_qspi>; ++ status = "okay"; ++ ++ flash@0 { ++ compatible = "jedec,spi-nor"; ++ reg = <0>; ++ spi-max-frequency = <26500000>; ++ m25p,fast-read; ++ broken-flash-reset; ++ status = "okay"; ++ }; ++}; ++ ++&i2c2 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i2c2_0>; ++ spacemit,i2c-fast-mode; ++ status = "okay"; ++}; ++ ++&i2c8 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i2c8>; ++ status = "okay"; ++ ++ spmp1@41 { ++ compatible = "spacemit,p1"; ++ reg = <0x41>; ++ interrupt-parent = <&intc>; ++ interrupts = <64>; ++ status = "okay"; ++ ++ regulators { ++ compatible = "spacemit,p1,regulator"; ++ ++ dcdc_1: DCDC_REG1 { ++ regulator-name = "dcdc1"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3450000>; ++ regulator-ramp-delay = <5000>; ++ regulator-always-on; ++ ++ regulator-state-mem { ++ regulator-off-in-suspend; ++ regulator-suspend-microvolt = <650000>; ++ }; ++ }; ++ ++ dcdc_2: DCDC_REG2 { ++ regulator-name = "dcdc2"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3450000>; ++ regulator-ramp-delay = <5000>; ++ regulator-always-on; ++ }; ++ ++ dcdc_3: DCDC_REG3 { ++ regulator-name = "dcdc3"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <1800000>; ++ regulator-ramp-delay = <5000>; ++ regulator-always-on; ++ }; ++ ++ dcdc_4: DCDC_REG4 { ++ regulator-name = "dcdc4"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3300000>; ++ regulator-ramp-delay = <5000>; ++ regulator-always-on; ++ regulator-state-mem { ++ regulator-off-in-suspend; ++ regulator-suspend-microvolt = <3300000>; ++ }; ++ }; ++ ++ dcdc_5: DCDC_REG5 { ++ regulator-name = "dcdc5"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3450000>; ++ regulator-ramp-delay = <5000>; ++ regulator-always-on; ++ }; ++ ++ dcdc_6: DCDC_REG6 { ++ regulator-name = "dcdc6"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3450000>; ++ regulator-ramp-delay = <5000>; ++ regulator-always-on; ++ }; ++ ++ ldo_1: LDO_REG1 { ++ regulator-name = "ldo1"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ regulator-boot-on; ++ regulator-state-mem { ++ regulator-off-in-suspend; ++ regulator-suspend-microvolt = <500000>; ++ }; ++ }; ++ ++ ldo_2: LDO_REG2 { ++ regulator-name = "ldo2"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ regulator-state-mem { ++ regulator-off-in-suspend; ++ regulator-suspend-microvolt = <500000>; ++ }; ++ }; ++ ++ ldo_3: LDO_REG3 { ++ regulator-name = "ldo3"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ regulator-state-mem { ++ regulator-off-in-suspend; ++ regulator-suspend-microvolt = <500000>; ++ }; ++ }; ++ ++ ldo_4: LDO_REG4 { ++ regulator-name = "ldo4"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ regulator-state-mem { ++ regulator-off-in-suspend; ++ regulator-suspend-microvolt = <500000>; ++ }; ++ }; ++ ++ ldo_5: LDO_REG5 { ++ regulator-name = "ldo5"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ regulator-boot-on; ++ regulator-state-mem { ++ regulator-off-in-suspend; ++ regulator-suspend-microvolt = <500000>; ++ }; ++ }; ++ ++ ldo_6: LDO_REG6 { ++ regulator-name = "ldo6"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ regulator-state-mem { ++ regulator-off-in-suspend; ++ regulator-suspend-microvolt = <500000>; ++ }; ++ }; ++ ++ ldo_7: LDO_REG7 { ++ regulator-name = "ldo7"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ regulator-state-mem { ++ regulator-off-in-suspend; ++ regulator-suspend-microvolt = <500000>; ++ }; ++ }; ++ ++ ldo_8: LDO_REG8 { ++ regulator-name = "ldo8"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ regulator-always-on; ++ }; ++ ++ ldo_9: LDO_REG9 { ++ regulator-name = "ldo9"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ }; ++ ++ ldo_10: LDO_REG10 { ++ regulator-name = "ldo10"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ regulator-always-on; ++ }; ++ ++ ldo_11: LDO_REG11 { ++ regulator-name = "ldo11"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ }; ++ ++ sw_1: SWITCH_REG1 { ++ regulator-name = "switch1"; ++ }; ++ }; ++ ++ pmic_pinctrl: pinctrl { ++ compatible = "spacemit,p1,pinctrl"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ spm_pmic,npins = <6>; ++ }; ++ ++ pwr_key: key { ++ compatible = "spacemit,p1,pwrkey"; ++ }; ++ ++ ext_rtc: rtc { ++ compatible = "spacemit,p1,rtc"; ++ }; ++ ++ ext_adc: adc { ++ compatible = "spacemit,p1,adc"; ++ }; ++ }; ++}; ++ ++&pwm7 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_pwm7_0>; ++ status = "okay"; ++}; ++ ++/* SDCard */ ++&sdhci0 { ++ pinctrl-names = "default","fast"; ++ pinctrl-0 = <&pinctrl_mmc1>; ++ pinctrl-1 = <&pinctrl_mmc1_fast>; ++ bus-width = <4>; ++ cd-gpios = <&gpio 80 0>; ++ cd-inverted; ++ vmmc-supply = <&dcdc_4>; ++ vqmmc-supply = <&ldo_1>; ++ no-mmc; ++ no-sdio; ++ spacemit,sdh-host-caps-disable = <(MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25)>; ++ spacemit,sdh-quirks = <(SDHCI_QUIRK_BROKEN_CARD_DETECTION | ++ SDHCI_QUIRK_INVERTED_WRITE_PROTECT | ++ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)>; ++ spacemit,sdh-quirks2 = <(SDHCI_QUIRK2_PRESET_VALUE_BROKEN | ++ SDHCI_QUIRK2_BROKEN_PHY_MODULE | ++ SDHCI_QUIRK2_SET_AIB_MMC)>; ++ spacemit,aib_mmc1_io_reg = <0xD401E81C>; ++ spacemit,apbc_asfar_reg = <0xD4015050>; ++ spacemit,apbc_assar_reg = <0xD4015054>; ++ spacemit,rx_dline_reg = <0x0>; ++ spacemit,tx_dline_reg = <0x0>; ++ spacemit,tx_delaycode = <0x5f>; ++ spacemit,rx_tuning_limit = <50>; ++ spacemit,sdh-freq = <204800000>; ++ status = "okay"; ++}; ++ ++/* SDIO */ ++&sdhci1 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_mmc2>; ++ bus-width = <4>; ++ non-removable; ++ vqmmc-supply = <&dcdc_3>; ++ no-mmc; ++ no-sd; ++ spacemit,sdh-host-caps-disable = <(MMC_CAP_UHS_DDR50 | MMC_CAP_NEEDS_POLL)>; ++ spacemit,sdh-quirks = <(SDHCI_QUIRK_BROKEN_CARD_DETECTION | ++ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)>; ++ spacemit,sdh-quirks2 = <(SDHCI_QUIRK2_PRESET_VALUE_BROKEN | ++ SDHCI_QUIRK2_BROKEN_PHY_MODULE)>; ++ spacemit,rx_dline_reg = <0x0>; ++ spacemit,tx_delaycode = <0x8f 0x5f>; ++ spacemit,rx_tuning_limit = <50>; ++ spacemit,sdh-freq = <375000000>; ++ status = "okay"; ++}; ++ ++/* eMMC */ ++&sdhci2 { ++ bus-width = <8>; ++ non-removable; ++ mmc-hs400-1_8v; ++ mmc-hs400-enhanced-strobe; ++ no-sd; ++ no-sdio; ++ spacemit,sdh-quirks = <(SDHCI_QUIRK_BROKEN_CARD_DETECTION | ++ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)>; ++ spacemit,sdh-quirks2 = ; ++ spacemit,sdh-freq = <375000000>; ++ status = "okay"; ++}; ++ ++ð0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_gmac0>; ++ ++ emac,reset-gpio = <&gpio 110 0>; ++ emac,reset-active-low; ++ emac,reset-delays-us = <0 10000 100000>; ++ ++ /* store forward mode */ ++ tx-threshold = <1518>; ++ rx-threshold = <12>; ++ tx-ring-num = <1024>; ++ rx-ring-num = <1024>; ++ dma-burst-len = <5>; ++ ++ ref-clock-from-phy; ++ clk-tuning-enable; ++ clk-tuning-by-delayline; ++ tx-phase = <60>; ++ rx-phase = <73>; ++ phy-handle = <&rgmii0>; ++ status = "okay"; ++ ++ mdio-bus { ++ #address-cells = <0x1>; ++ #size-cells = <0x0>; ++ rgmii0: phy@0 { ++ compatible = "ethernet-phy-id001c.c916"; ++ device_type = "ethernet-phy"; ++ reg = <0x1>; ++ phy-mode = "rgmii"; ++ }; ++ }; ++}; ++ ++ð1 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_gmac1>; ++ ++ emac,reset-gpio = <&gpio 115 0>; ++ emac,reset-active-low; ++ emac,reset-delays-us = <0 10000 100000>; ++ ++ /* store forward mode */ ++ tx-threshold = <1518>; ++ rx-threshold = <12>; ++ tx-ring-num = <1024>; ++ rx-ring-num = <1024>; ++ dma-burst-len = <5>; ++ ++ ref-clock-from-phy; ++ clk-tuning-enable; ++ clk-tuning-by-delayline; ++ tx-phase = <90>; ++ rx-phase = <73>; ++ phy-handle = <&rgmii1>; ++ status = "okay"; ++ ++ mdio-bus { ++ #address-cells = <0x1>; ++ #size-cells = <0x0>; ++ rgmii1: phy@1 { ++ compatible = "ethernet-phy-id001c.c916"; ++ device_type = "ethernet-phy"; ++ reg = <0x1>; ++ phy-mode = "rgmii"; ++ }; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/spacemit/k1-x.dtsi b/arch/riscv/boot/dts/spacemit/k1-x.dtsi +new file mode 100644 +index 000000000000..3c7e2ad81529 +--- /dev/null ++++ b/arch/riscv/boot/dts/spacemit/k1-x.dtsi +@@ -0,0 +1,1221 @@ ++// SPDX-License-Identifier: (GPL-2.0 OR MIT) ++/* Copyright (c) 2022 Spacemit, Inc */ ++ ++/dts-v1/; ++ ++#include ++#include ++#include ++#include ++ ++/ { ++ compatible = "spacemit,k1-x"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ aliases { ++ serial0 = &uart0; ++ serial1 = &uart2; ++ serial2 = &uart3; ++ serial3 = &uart4; ++ serial4 = &uart5; ++ serial5 = &uart6; ++ serial6 = &uart7; ++ serial7 = &uart8; ++ serial8 = &uart9; ++ mmc0 = &sdhci0; ++ mmc1 = &sdhci1; ++ mmc2 = &sdhci2; ++ ethernet0 = ð0; ++ ethernet1 = ð1; ++ }; ++ ++ cpus: cpus { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ timebase-frequency = <24000000>; ++ cpu_0: cpu@0 { ++ compatible = "spacemit,x60", "riscv"; ++ device_type = "cpu"; ++ model = "Spacemit(R) X60"; ++ reg = <0>; ++ status = "okay"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicbom", "zicboz", "zicntr", "zicond", ++ "zicsr", "zifencei", "zihintpause", "zihpm", ++ "zfh", "zfhmin", "zba", "zbb", "zbc", "zbs", ++ "zkt", "zvfh", "zvfhmin", "zvkt", "sscofpmf", ++ "sstc", "svinval", "svnapot", "svpbmt" ; ++ riscv,cbom-block-size = <64>; ++ riscv,cboz-block-size = <64>; ++ i-cache-block-size = <64>; ++ i-cache-size = <32768>; ++ i-cache-sets = <128>; ++ d-cache-block-size = <64>; ++ d-cache-size = <32768>; ++ d-cache-sets = <128>; ++ next-level-cache = <&clst0_l2_cache>; ++ mmu-type = "riscv,sv39"; ++ cpu-ai = "true"; ++ ++ cpu0_intc: interrupt-controller { ++ #interrupt-cells = <1>; ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ }; ++ }; ++ ++ cpu_1: cpu@1 { ++ device_type = "cpu"; ++ reg = <1>; ++ status = "okay"; ++ compatible = "spacemit,x60", "riscv"; ++ model = "Spacemit(R) X60"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicbom", "zicboz", "zicntr", "zicond", ++ "zicsr", "zifencei", "zihintpause", "zihpm", ++ "zfh", "zfhmin", "zba", "zbb", "zbc", "zbs", ++ "zkt", "zvfh", "zvfhmin", "zvkt", "sscofpmf", ++ "sstc", "svinval", "svnapot", "svpbmt" ; ++ riscv,cbom-block-size = <64>; ++ riscv,cboz-block-size = <64>; ++ i-cache-block-size = <64>; ++ i-cache-size = <32768>; ++ i-cache-sets = <128>; ++ d-cache-block-size = <64>; ++ d-cache-size = <32768>; ++ d-cache-sets = <128>; ++ next-level-cache = <&clst0_l2_cache>; ++ mmu-type = "riscv,sv39"; ++ cpu-ai = "true"; ++ ++ cpu1_intc: interrupt-controller { ++ #interrupt-cells = <1>; ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ }; ++ }; ++ ++ cpu_2: cpu@2 { ++ device_type = "cpu"; ++ reg = <2>; ++ status = "okay"; ++ compatible = "spacemit,x60", "riscv"; ++ model = "Spacemit(R) X60"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicbom", "zicboz", "zicntr", "zicond", ++ "zicsr", "zifencei", "zihintpause", "zihpm", ++ "zfh", "zfhmin", "zba", "zbb", "zbc", "zbs", ++ "zkt", "zvfh", "zvfhmin", "zvkt", "sscofpmf", ++ "sstc", "svinval", "svnapot", "svpbmt" ; ++ riscv,cbom-block-size = <64>; ++ riscv,cboz-block-size = <64>; ++ i-cache-block-size = <64>; ++ i-cache-size = <32768>; ++ i-cache-sets = <128>; ++ d-cache-block-size = <64>; ++ d-cache-size = <32768>; ++ d-cache-sets = <128>; ++ next-level-cache = <&clst0_l2_cache>; ++ mmu-type = "riscv,sv39"; ++ cpu-ai = "true"; ++ ++ cpu2_intc: interrupt-controller { ++ #interrupt-cells = <1>; ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ }; ++ }; ++ ++ cpu_3: cpu@3 { ++ device_type = "cpu"; ++ reg = <3>; ++ status = "okay"; ++ compatible = "spacemit,x60", "riscv"; ++ model = "Spacemit(R) X60"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicbom", "zicboz", "zicntr", "zicond", ++ "zicsr", "zifencei", "zihintpause", "zihpm", ++ "zfh", "zfhmin", "zba", "zbb", "zbc", "zbs", ++ "zkt", "zvfh", "zvfhmin", "zvkt", "sscofpmf", ++ "sstc", "svinval", "svnapot", "svpbmt" ; ++ riscv,cbom-block-size = <64>; ++ riscv,cboz-block-size = <64>; ++ i-cache-block-size = <64>; ++ i-cache-size = <32768>; ++ i-cache-sets = <128>; ++ d-cache-block-size = <64>; ++ d-cache-size = <32768>; ++ d-cache-sets = <128>; ++ next-level-cache = <&clst0_l2_cache>; ++ mmu-type = "riscv,sv39"; ++ cpu-ai = "true"; ++ ++ cpu3_intc: interrupt-controller { ++ #interrupt-cells = <1>; ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ }; ++ }; ++ ++ cpu_4: cpu@4 { ++ device_type = "cpu"; ++ reg = <4>; ++ status = "okay"; ++ compatible = "spacemit,x60", "riscv"; ++ model = "Spacemit(R) X60"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicbom", "zicboz", "zicntr", "zicond", ++ "zicsr", "zifencei", "zihintpause", "zihpm", ++ "zfh", "zfhmin", "zba", "zbb", "zbc", "zbs", ++ "zkt", "zvfh", "zvfhmin", "zvkt", "sscofpmf", ++ "sstc", "svinval", "svnapot", "svpbmt" ; ++ riscv,cbom-block-size = <64>; ++ riscv,cboz-block-size = <64>; ++ i-cache-block-size = <64>; ++ i-cache-size = <32768>; ++ i-cache-sets = <128>; ++ d-cache-block-size = <64>; ++ d-cache-size = <32768>; ++ d-cache-sets = <128>; ++ next-level-cache = <&clst1_l2_cache>; ++ mmu-type = "riscv,sv39"; ++ ++ cpu4_intc: interrupt-controller { ++ #interrupt-cells = <1>; ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ }; ++ }; ++ ++ cpu_5: cpu@5 { ++ device_type = "cpu"; ++ reg = <5>; ++ status = "okay"; ++ compatible = "spacemit,x60", "riscv"; ++ model = "Spacemit(R) X60"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicbom", "zicboz", "zicntr", "zicond", ++ "zicsr", "zifencei", "zihintpause", "zihpm", ++ "zfh", "zfhmin", "zba", "zbb", "zbc", "zbs", ++ "zkt", "zvfh", "zvfhmin", "zvkt", "sscofpmf", ++ "sstc", "svinval", "svnapot", "svpbmt" ; ++ riscv,cbom-block-size = <64>; ++ riscv,cboz-block-size = <64>; ++ i-cache-block-size = <64>; ++ i-cache-size = <32768>; ++ i-cache-sets = <128>; ++ d-cache-block-size = <64>; ++ d-cache-size = <32768>; ++ d-cache-sets = <128>; ++ next-level-cache = <&clst1_l2_cache>; ++ mmu-type = "riscv,sv39"; ++ ++ cpu5_intc: interrupt-controller { ++ #interrupt-cells = <1>; ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ }; ++ }; ++ ++ cpu_6: cpu@6 { ++ device_type = "cpu"; ++ reg = <6>; ++ status = "okay"; ++ compatible = "spacemit,x60", "riscv"; ++ model = "Spacemit(R) X60"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicbom", "zicboz", "zicntr", "zicond", ++ "zicsr", "zifencei", "zihintpause", "zihpm", ++ "zfh", "zfhmin", "zba", "zbb", "zbc", "zbs", ++ "zkt", "zvfh", "zvfhmin", "zvkt", "sscofpmf", ++ "sstc", "svinval", "svnapot", "svpbmt" ; ++ riscv,cbom-block-size = <64>; ++ riscv,cboz-block-size = <64>; ++ i-cache-block-size = <64>; ++ i-cache-size = <32768>; ++ i-cache-sets = <128>; ++ d-cache-block-size = <64>; ++ d-cache-size = <32768>; ++ d-cache-sets = <128>; ++ next-level-cache = <&clst1_l2_cache>; ++ mmu-type = "riscv,sv39"; ++ ++ cpu6_intc: interrupt-controller { ++ #interrupt-cells = <1>; ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ }; ++ }; ++ ++ cpu_7: cpu@7 { ++ device_type = "cpu"; ++ reg = <7>; ++ status = "okay"; ++ compatible = "spacemit,x60", "riscv"; ++ model = "Spacemit(R) X60"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicbom", "zicboz", "zicntr", "zicond", ++ "zicsr", "zifencei", "zihintpause", "zihpm", ++ "zfh", "zfhmin", "zba", "zbb", "zbc", "zbs", ++ "zkt", "zvfh", "zvfhmin", "zvkt", "sscofpmf", ++ "sstc", "svinval", "svnapot", "svpbmt" ; ++ riscv,cbom-block-size = <64>; ++ riscv,cboz-block-size = <64>; ++ i-cache-block-size = <64>; ++ i-cache-size = <32768>; ++ i-cache-sets = <128>; ++ d-cache-block-size = <64>; ++ d-cache-size = <32768>; ++ d-cache-sets = <128>; ++ next-level-cache = <&clst1_l2_cache>; ++ mmu-type = "riscv,sv39"; ++ ++ cpu7_intc: interrupt-controller { ++ #interrupt-cells = <1>; ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ }; ++ }; ++ ++ cpu-map { ++ cluster0 { ++ core0 { ++ cpu = <&cpu_0>; ++ }; ++ ++ core1 { ++ cpu = <&cpu_1>; ++ }; ++ ++ core2 { ++ cpu = <&cpu_2>; ++ }; ++ ++ core3 { ++ cpu = <&cpu_3>; ++ }; ++ }; ++ ++ cluster1 { ++ core0 { ++ cpu = <&cpu_4>; ++ }; ++ ++ core1 { ++ cpu = <&cpu_5>; ++ }; ++ ++ core2 { ++ cpu = <&cpu_6>; ++ }; ++ ++ core3 { ++ cpu = <&cpu_7>; ++ }; ++ }; ++ }; ++ ++ clst0_l2_cache: l2-cache0 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <524288>; ++ cache-sets = <512>; ++ cache-unified; ++ }; ++ ++ clst1_l2_cache: l2-cache1 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <524288>; ++ cache-sets = <512>; ++ cache-unified; ++ }; ++ }; ++ ++ clocks { ++ #address-cells = <0x2>; ++ #size-cells = <0x2>; ++ ranges; ++ ++ vctcxo_24: clock-vctcxo_24 { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <24000000>; ++ clock-output-names = "vctcxo_24"; ++ }; ++ ++ vctcxo_3: clock-vctcxo_3 { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <3000000>; ++ clock-output-names = "vctcxo_3"; ++ }; ++ ++ vctcxo_1: clock-vctcxo_1 { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <1000000>; ++ clock-output-names = "vctcxo_1"; ++ }; ++ ++ pll1_2457p6_vco: clock-pll1_2457p6_vco { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <2457600000>; ++ clock-output-names = "pll1_2457p6_vco"; ++ }; ++ ++ clk_32k: clock-clk32k { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <32000>; ++ clock-output-names = "clk_32k"; ++ }; ++ ++ pll_clk_cluster0: clock-pll_clk_cluster0 { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <10000000>; ++ clock-output-names = "pll_clk_cluster0"; ++ }; ++ ++ pll_clk_cluster1: clock-pll_clk_cluster1 { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <10000000>; ++ clock-output-names = "pll_clk_cluster1"; ++ }; ++ }; ++ ++ soc: soc { ++ compatible = "simple-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ dma-noncoherent; ++ ranges; ++ ++ /* ++ * dram mapping for dma/usb/sdh for ex, ++ * only 2GB available for devices. ++ */ ++ dram_range0: dram_range@0 { ++ compatible = "spacemit-dram-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ dma-ranges = <0x0 0x00000000 0x0 0x00000000 0x0 0x80000000>; ++ #interconnect-cells = <0>; ++ status = "okay"; ++ }; ++ ++ /* ++ * dram mapping for vpu/gpu/dpu/v2d/isp/csi/vi/cpp, ++ * and eth/crypto/jpu for ex. ++ * 4GB space is available for devices, and the mapping is: ++ * 0~2GB of device's address space is mapping to 0~2GB of cpu's ++ * 2~4GB of device's address space is mapping to 4~6GB of cpu's ++ */ ++ dram_range1: dram_range@1 { ++ compatible = "spacemit-dram-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ dma-ranges = <0x0 0x00000000 0x0 0x00000000 0x0 0x80000000>, ++ <0x0 0x80000000 0x1 0x00000000 0x0 0x80000000>; ++ #interconnect-cells = <0>; ++ status = "okay"; ++ }; ++ ++ /* ++ * dram mapping for pcie0/pcie1/pcie2 for ex. ++ * 14GB space is available for devices, and the mapping is: ++ * 0~2GB of device's address space is mapping to 0~2GB of cpu's ++ * 4~16GB of device's address space is mapping to 6~16GB of cpu's ++ * the 2~4GB of devcie's address space is io area ++ */ ++ dram_range2: dram_range@2 { ++ compatible = "spacemit-dram-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ dma-ranges = <0x0 0x00000000 0x0 0x00000000 0x0 0x80000000>, ++ <0x1 0x00000000 0x1 0x80000000 0x3 0x00000000>; ++ #interconnect-cells = <0>; ++ status = "okay"; ++ }; ++ ++ clint0: clint@e4000000 { ++ compatible = "riscv,clint0"; ++ interrupts-extended = < ++ &cpu0_intc 3 &cpu0_intc 7 ++ &cpu1_intc 3 &cpu1_intc 7 ++ &cpu2_intc 3 &cpu2_intc 7 ++ &cpu3_intc 3 &cpu3_intc 7 ++ &cpu4_intc 3 &cpu4_intc 7 ++ &cpu5_intc 3 &cpu5_intc 7 ++ &cpu6_intc 3 &cpu6_intc 7 ++ &cpu7_intc 3 &cpu7_intc 7 ++ >; ++ reg = <0x0 0xE4000000 0x0 0x00010000>; ++ }; ++ ++ ccu: clock-controller@d4050000 { ++ compatible = "spacemit,k1x-clock"; ++ reg = <0x0 0xd4050000 0x0 0x209c>, ++ <0x0 0xd4282800 0x0 0x400>, ++ <0x0 0xd4015000 0x0 0x1000>, ++ <0x0 0xd4090000 0x0 0x1000>, ++ <0x0 0xd4282c00 0x0 0x400>, ++ <0x0 0xd8440000 0x0 0x98>, ++ <0x0 0xc0000000 0x0 0x4280>, ++ <0x0 0xf0610000 0x0 0x20>, ++ <0x0 0xc0880000 0x0 0x2050>, ++ <0x0 0xc0888000 0x0 0x30>; ++ reg-names = "mpmu", "apmu", "apbc", "apbs", "ciu", ++ "dciu", "ddrc", "apbc2", "rcpu", "rcpu2"; ++ clocks = <&vctcxo_24>, <&vctcxo_3>, <&vctcxo_1>, ++ <&pll1_2457p6_vco>, <&clk_32k>; ++ clock-names = "vctcxo_24", "vctcxo_3", "vctcxo_1", ++ "pll1_2457p6_vco", ++ "clk_32k"; ++ #clock-cells = <1>; ++ status = "okay"; ++ }; ++ ++ reset: reset-controller@d4050000 { ++ compatible = "spacemit,k1x-reset"; ++ reg = <0x0 0xd4050000 0x0 0x209c>, ++ <0x0 0xd4282800 0x0 0x400>, ++ <0x0 0xd4015000 0x0 0x1000>, ++ <0x0 0xd4090000 0x0 0x1000>, ++ <0x0 0xd4282c00 0x0 0x400>, ++ <0x0 0xd8440000 0x0 0x98>, ++ <0x0 0xc0000000 0x0 0x4280>, ++ <0x0 0xf0610000 0x0 0x20>, ++ <0x0 0xc0880000 0x0 0x2050>, ++ <0x0 0xc0888000 0x0 0x30>; ++ reg-names = "mpmu", "apmu", "apbc", "apbs", "ciu", ++ "dciu", "ddrc", "apbc2", "rcpu", "rcpu2"; ++ #reset-cells = <1>; ++ status = "okay"; ++ }; ++ ++ intc: interrupt-controller@e0000000 { ++ #interrupt-cells = <1>; ++ compatible = "riscv,plic0"; ++ interrupt-controller; ++ interrupts-extended = < ++ &cpu0_intc 11 &cpu0_intc 9 ++ &cpu1_intc 11 &cpu1_intc 9 ++ &cpu2_intc 11 &cpu2_intc 9 ++ &cpu3_intc 11 &cpu3_intc 9 ++ &cpu4_intc 11 &cpu4_intc 9 ++ &cpu5_intc 11 &cpu5_intc 9 ++ &cpu6_intc 11 &cpu6_intc 9 ++ &cpu7_intc 11 &cpu7_intc 9 ++ >; ++ reg = <0x0 0xE0000000 0x0 0x04000000>; ++ reg-names = "control"; ++ riscv,max-priority = <7>; ++ riscv,ndev = <159>; ++ }; ++ ++ pinctrl: pinctrl@d401e000 { ++ compatible = "pinctrl-spacemit-k1x"; ++ reg = <0x0 0xd401e000 0x0 0x250>, ++ <0x0 0xd4019800 0x0 0x10>, ++ <0x0 0xd4019000 0x0 0x800>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ #pinctrl-cells = <2>; ++ #gpio-range-cells = <3>; ++ ++ pinctrl-single,register-width = <32>; ++ pinctrl-single,function-mask = <0xff77>; ++ ++ clocks = <&ccu CLK_AIB>; ++ clock-names = "clk_aib"; ++ resets = <&reset RESET_AIB>; ++ reset-names = "aib_rst"; ++ ++ interrupt-parent = <&intc>; ++ interrupts = <60>; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ ++ range: gpio-range { ++ #pinctrl-single,gpio-range-cells = <3>; ++ }; ++ }; ++ ++ pdma0: pdma@d4000000 { ++ compatible = "spacemit,k1-pdma"; ++ reg = <0x0 0xd4000000 0x0 0x4000>; ++ interrupts = <72>; ++ interrupt-parent = <&intc>; ++ clocks = <&ccu CLK_DMA>; ++ resets = <&reset RESET_DMA>; ++ #dma-cells= <2>; ++ #dma-channels = <16>; ++ max-burst-size = <64>; ++ reserved-channels = <15 45>; ++ interconnects = <&dram_range0>; ++ interconnect-names = "dma-mem"; ++ status = "ok"; ++ }; ++ ++ uart0: serial@d4017000 { ++ compatible = "spacemit,k1x-uart"; ++ reg = <0x0 0xd4017000 0x0 0x100>; ++ interrupt-parent = <&intc>; ++ interrupts = <42>; ++ clocks = <&ccu CLK_UART1>, <&ccu CLK_SLOW_UART>; ++ clock-names = "func", "gate"; ++ resets = <&reset RESET_UART1>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ status = "disabled"; ++ }; ++ ++ uart2: uart@d4017100 { ++ compatible = "spacemit,k1x-uart"; ++ reg = <0x0 0xd4017100 0x0 0x100>; ++ interrupt-parent = <&intc>; ++ interrupts = <44>; ++ clocks = <&ccu CLK_UART2>, <&ccu CLK_SLOW_UART>; ++ clock-names = "func", "gate"; ++ resets = <&reset RESET_UART2>; ++ status = "disabled"; ++ }; ++ ++ uart3: uart@d4017200 { ++ compatible = "spacemit,k1x-uart"; ++ reg = <0x0 0xd4017200 0x0 0x100>; ++ interrupt-parent = <&intc>; ++ interrupts = <45>; ++ clocks = <&ccu CLK_UART3>, <&ccu CLK_SLOW_UART>; ++ clock-names = "func", "gate"; ++ resets = <&reset RESET_UART3>; ++ status = "disabled"; ++ }; ++ ++ uart4: uart@d4017300 { ++ compatible = "spacemit,k1x-uart"; ++ interrupt-parent = <&intc>; ++ reg = <0x0 0xd4017300 0x0 0x100>; ++ interrupts = <46>; ++ clocks = <&ccu CLK_UART4>, <&ccu CLK_SLOW_UART>; ++ clock-names = "func", "gate"; ++ resets = <&reset RESET_UART4>; ++ status = "disabled"; ++ }; ++ ++ uart5: uart@d4017400 { ++ compatible = "spacemit,k1x-uart"; ++ interrupt-parent = <&intc>; ++ reg = <0x0 0xd4017400 0x0 0x100>; ++ interrupts = <47>; ++ clocks = <&ccu CLK_UART5>, <&ccu CLK_SLOW_UART>; ++ clock-names = "func", "gate"; ++ resets = <&reset RESET_UART5>; ++ status = "disabled"; ++ }; ++ ++ uart6: uart@d4017500 { ++ compatible = "spacemit,k1x-uart"; ++ interrupt-parent = <&intc>; ++ reg = <0x0 0xd4017500 0x0 0x100>; ++ interrupts = <48>; ++ clocks = <&ccu CLK_UART6>, <&ccu CLK_SLOW_UART>; ++ clock-names = "func", "gate"; ++ resets = <&reset RESET_UART6>; ++ status = "disabled"; ++ }; ++ ++ uart7: uart@d4017600 { ++ compatible = "spacemit,k1x-uart"; ++ interrupt-parent = <&intc>; ++ reg = <0x0 0xd4017600 0x0 0x100>; ++ interrupts = <49>; ++ clocks = <&ccu CLK_UART7>, <&ccu CLK_SLOW_UART>; ++ clock-names = "func", "gate"; ++ resets = <&reset RESET_UART7>; ++ status = "disabled"; ++ }; ++ ++ uart8: uart@d4017700 { ++ compatible = "spacemit,k1x-uart"; ++ interrupt-parent = <&intc>; ++ reg = <0x0 0xd4017700 0x0 0x100>; ++ interrupts = <50>; ++ clocks = <&ccu CLK_UART8>, <&ccu CLK_SLOW_UART>; ++ clock-names = "func", "gate"; ++ resets = <&reset RESET_UART8>; ++ status = "disabled"; ++ }; ++ ++ uart9: uart@d4017800 { ++ compatible = "spacemit,k1x-uart"; ++ interrupt-parent = <&intc>; ++ reg = <0x0 0xd4017800 0x0 0x100>; ++ interrupts = <51>; ++ clocks = <&ccu CLK_UART9>, <&ccu CLK_SLOW_UART>; ++ clock-names = "func", "gate"; ++ resets = <&reset RESET_UART9>; ++ status = "disabled"; ++ }; ++ ++ i2c0: i2c@d4010800 { ++ compatible = "spacemit,k1-i2c"; ++ spacemit,adapter-id = <0>; ++ reg = <0x0 0xd4010800 0x0 0x38>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = <36>; ++ clocks = <&ccu CLK_TWSI0>; ++ resets = <&reset RESET_TWSI0>; ++ spacemit,i2c-master-code = /bits/ 8 <0x0e>; ++ spacemit,i2c-clk-rate = <32000000>; ++ spacemit,i2c-lcr = <0x82c469f>; ++ spacemit,i2c-wcr = <0x142a>; ++ /* apb clock: 26MHz or 52MHz */ ++ spacemit,apb_clock = <52000000>; ++ status = "disabled"; ++ }; ++ ++ i2c1: i2c@d4011000 { ++ compatible = "spacemit,k1-i2c"; ++ spacemit,adapter-id = <1>; ++ reg = <0x0 0xd4011000 0x0 0x38>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = <37>; ++ clocks = <&ccu CLK_TWSI1>; ++ resets = <&reset RESET_TWSI1>; ++ spacemit,i2c-master-code = /bits/ 8 <0x0e>; ++ spacemit,i2c-clk-rate = <32000000>; ++ spacemit,i2c-lcr = <0x82c469f>; ++ spacemit,i2c-wcr = <0x142a>; ++ /* apb clock: 26MHz or 52MHz */ ++ spacemit,apb_clock = <52000000>; ++ status = "disabled"; ++ }; ++ ++ i2c2: i2c@d4012000 { ++ compatible = "spacemit,k1-i2c"; ++ spacemit,adapter-id = <2>; ++ reg = <0x0 0xd4012000 0x0 0x38>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = <38>; ++ clocks = <&ccu CLK_TWSI2>; ++ resets = <&reset RESET_TWSI2>; ++ spacemit,i2c-master-code = /bits/ 8 <0x0e>; ++ spacemit,i2c-clk-rate = <32000000>; ++ spacemit,i2c-lcr = <0x82c469f>; ++ spacemit,i2c-wcr = <0x142a>; ++ /* apb clock: 26MHz or 52MHz */ ++ spacemit,apb_clock = <52000000>; ++ status = "disabled"; ++ }; ++ ++ i2c4: i2c@d4012800 { ++ compatible = "spacemit,k1-i2c"; ++ spacemit,adapter-id = <4>; ++ reg = <0x0 0xd4012800 0x0 0x38>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = <40>; ++ clocks = <&ccu CLK_TWSI4>; ++ resets = <&reset RESET_TWSI4>; ++ spacemit,i2c-master-code = /bits/ 8 <0x0e>; ++ spacemit,i2c-clk-rate = <32000000>; ++ spacemit,i2c-lcr = <0x82c469f>; ++ spacemit,i2c-wcr = <0x142a>; ++ /* apb clock: 26MHz or 52MHz */ ++ spacemit,apb_clock = <52000000>; ++ status = "disabled"; ++ }; ++ ++ i2c5: i2c@d4013800 { ++ compatible = "spacemit,k1-i2c"; ++ spacemit,adapter-id = <5>; ++ reg = <0x0 0xd4013800 0x0 0x38>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = <41>; ++ clocks = <&ccu CLK_TWSI5>; ++ resets = <&reset RESET_TWSI5>; ++ spacemit,i2c-master-code = /bits/ 8 <0x0e>; ++ spacemit,i2c-clk-rate = <32000000>; ++ spacemit,i2c-lcr = <0x82c469f>; ++ spacemit,i2c-wcr = <0x142a>; ++ /* apb clock: 26MHz or 52MHz */ ++ spacemit,apb_clock = <52000000>; ++ status = "disabled"; ++ }; ++ ++ i2c6: i2c@d4018800 { ++ compatible = "spacemit,k1-i2c"; ++ spacemit,adapter-id = <6>; ++ reg = <0x0 0xd4018800 0x0 0x38>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = <70>; ++ clocks = <&ccu CLK_TWSI6>; ++ resets = <&reset RESET_TWSI6>; ++ spacemit,i2c-master-code = /bits/ 8 <0x0e>; ++ spacemit,i2c-clk-rate = <32000000>; ++ spacemit,i2c-lcr = <0x82c469f>; ++ spacemit,i2c-wcr = <0x142a>; ++ /* apb clock: 26MHz or 52MHz */ ++ spacemit,apb_clock = <52000000>; ++ status = "disabled"; ++ }; ++ ++ i2c7: i2c@d401d000 { ++ compatible = "spacemit,k1-i2c"; ++ spacemit,adapter-id = <7>; ++ reg = <0x0 0xd401d000 0x0 0x38>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = <18>; ++ clocks = <&ccu CLK_TWSI7>; ++ resets = <&reset RESET_TWSI7>; ++ spacemit,i2c-master-code = /bits/ 8 <0x0e>; ++ spacemit,i2c-clk-rate = <32000000>; ++ spacemit,i2c-lcr = <0x82c469f>; ++ spacemit,i2c-wcr = <0x142a>; ++ /* apb clock: 26MHz or 52MHz */ ++ spacemit,apb_clock = <52000000>; ++ status = "disabled"; ++ }; ++ ++ i2c8: i2c@d401d800 { ++ compatible = "spacemit,k1-i2c"; ++ spacemit,adapter-id = <8>; ++ reg = <0x0 0xd401d800 0x0 0x38>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = <19>; ++ clocks = <&ccu CLK_TWSI8>; ++ resets = <&reset RESET_TWSI8>; ++ spacemit,i2c-master-code = /bits/ 8 <0x0e>; ++ spacemit,i2c-clk-rate = <32000000>; ++ spacemit,i2c-lcr = <0x82c469f>; ++ spacemit,i2c-wcr = <0x142a>; ++ /* apb clock: 26MHz or 52MHz */ ++ spacemit,apb_clock = <52000000>; ++ status = "disabled"; ++ }; ++ ++ spi0: spi@d4026000 { ++ compatible = "spacemit,k1-spi"; ++ reg = <0x0 0xd4026000 0x0 0x30>; ++ k1,spi-id = <0>; ++ k1,spi-clock-rate = <26000000>; ++ dmas = <&pdma0 DMA_SSPA0_RX 1 ++ &pdma0 DMA_SSPA0_TX 1>; ++ dma-names = "rx", "tx"; ++ interrupt-parent = <&intc>; ++ interrupts = <56>; ++ clocks = <&ccu CLK_SSPA0>; ++ resets = <&reset RESET_SSPA0>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interconnects = <&dram_range0>; ++ interconnect-names = "dma-mem"; ++ status = "disabled"; ++ }; ++ ++ spi1: spi@d4026800 { ++ compatible = "spacemit,k1-spi"; ++ reg = <0x0 0xd4026800 0x0 0x30>; ++ k1,spi-id = <1>; ++ k1,spi-clock-rate = <26000000>; ++ dmas = <&pdma0 DMA_SSPA1_RX 1 ++ &pdma0 DMA_SSPA1_TX 1>; ++ dma-names = "rx", "tx"; ++ interrupt-parent = <&intc>; ++ interrupts = <57>; ++ clocks = <&ccu CLK_SSPA1>; ++ resets = <&reset RESET_SSPA1>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interconnects = <&dram_range0>; ++ interconnect-names = "dma-mem"; ++ status = "disabled"; ++ }; ++ ++ spi3: spi@d401c000 { ++ compatible = "spacemit,k1-spi"; ++ reg = <0x0 0xd401c000 0x0 0x30>; ++ k1,spi-id = <3>; ++ k1,spi-clock-rate = <26000000>; ++ dmas = <&pdma0 DMA_SSP3_RX 1 ++ &pdma0 DMA_SSP3_TX 1>; ++ dma-names = "rx", "tx"; ++ interrupt-parent = <&intc>; ++ interrupts = <55>; ++ clocks = <&ccu CLK_SSP3>; ++ resets = <&reset RESET_SSP3>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interconnects = <&dram_range0>; ++ interconnect-names = "dma-mem"; ++ status = "disabled"; ++ }; ++ ++ qspi: spi@d420c000 { ++ compatible = "spacemit,k1-qspi"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0xd420c000 0x0 0x1000>, ++ <0x0 0xb8000000 0x0 0xc00000>; ++ reg-names = "qspi-base", "qspi-mmap"; ++ k1,qspi-sfa1ad = <0x4000000>; ++ k1,qspi-sfa2ad = <0x100000>; ++ k1,qspi-sfb1ad = <0x100000>; ++ k1,qspi-sfb2ad = <0x100000>; ++ clocks = <&ccu CLK_QSPI>, ++ <&ccu CLK_QSPI_BUS>; ++ clock-names = "qspi_clk", "qspi_bus_clk"; ++ resets = <&reset RESET_QSPI>, ++ <&reset RESET_QSPI_BUS>; ++ reset-names = "qspi_reset", "qspi_bus_reset"; ++ k1,qspi-pmuap-reg = <0xd4282860>; ++ k1,qspi-mpmu-acgr-reg = <0xd4051024>; ++ k1,qspi-freq = <26500000>; ++ k1,qspi-id = <4>; ++ interrupts = <117>; ++ interrupt-parent = <&intc>; ++ k1,qspi-tx-dma = <1>; ++ k1,qspi-rx-dma = <1>; ++ dmas = <&pdma0 DMA_QSPI_TX 1>; ++ dma-names = "tx-dma"; ++ interconnects = <&dram_range0>; ++ interconnect-names = "dma-mem"; ++ status = "disabled"; ++ }; ++ ++ pwm0: pwm@d401a000 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd401a000 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM0>; ++ resets = <&reset RESET_PWM0>; ++ status = "disabled"; ++ }; ++ ++ pwm1: pwm@d401a400 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd401a400 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM1>; ++ resets = <&reset RESET_PWM1>; ++ status = "disabled"; ++ }; ++ ++ pwm2: pwm@d401a800 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd401a800 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM2>; ++ resets = <&reset RESET_PWM2>; ++ status = "disabled"; ++ }; ++ ++ pwm3: pwm@d401ac00 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd401ac00 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM3>; ++ resets = <&reset RESET_PWM3>; ++ status = "disabled"; ++ }; ++ ++ pwm4: pwm@d401b000 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd401b000 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM4>; ++ resets = <&reset RESET_PWM4>; ++ status = "disabled"; ++ }; ++ ++ pwm5: pwm@d401b400 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd401b400 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM5>; ++ resets = <&reset RESET_PWM5>; ++ status = "disabled"; ++ }; ++ ++ pwm6: pwm@d401b800 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd401b800 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM6>; ++ resets = <&reset RESET_PWM6>; ++ status = "disabled"; ++ }; ++ ++ pwm7: pwm@d401bc00 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd401bc00 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM7>; ++ resets = <&reset RESET_PWM7>; ++ status = "disabled"; ++ }; ++ ++ pwm8: pwm@d4020000 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4020000 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM8>; ++ resets = <&reset RESET_PWM8>; ++ status = "disabled"; ++ }; ++ ++ pwm9: pwm@d4020400 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4020400 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM9>; ++ resets = <&reset RESET_PWM9>; ++ status = "disabled"; ++ }; ++ ++ pwm10: pwm@d4020800 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4020800 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM10>; ++ resets = <&reset RESET_PWM10>; ++ status = "disabled"; ++ }; ++ ++ pwm11: pwm@d4020c00 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4020c00 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM11>; ++ resets = <&reset RESET_PWM11>; ++ status = "disabled"; ++ }; ++ ++ pwm12: pwm@d4021000 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4021000 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM12>; ++ resets = <&reset RESET_PWM12>; ++ status = "disabled"; ++ }; ++ ++ pwm13: pwm@d4021400 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4021400 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM13>; ++ resets = <&reset RESET_PWM13>; ++ status = "disabled"; ++ }; ++ ++ pwm14: pwm@d4021800 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4021800 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM14>; ++ resets = <&reset RESET_PWM14>; ++ status = "disabled"; ++ }; ++ ++ pwm15: pwm@d4021c00 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4021c00 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM15>; ++ resets = <&reset RESET_PWM15>; ++ status = "disabled"; ++ }; ++ ++ pwm16: pwm@d4022000 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4022000 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM16>; ++ resets = <&reset RESET_PWM16>; ++ status = "disabled"; ++ }; ++ ++ pwm17: pwm@d4022400 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4022400 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM17>; ++ resets = <&reset RESET_PWM17>; ++ status = "disabled"; ++ }; ++ ++ pwm18: pwm@d4022800 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4022800 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM18>; ++ resets = <&reset RESET_PWM18>; ++ status = "disabled"; ++ }; ++ ++ pwm19: pwm@d4022c00 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4022c00 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM19>; ++ resets = <&reset RESET_PWM19>; ++ status = "disabled"; ++ }; ++ ++ gpio: gpio@d4019000 { ++ compatible = "spacemit,k1x-gpio"; ++ reg = <0x0 0xd4019000 0x0 0x800>; ++ gpio-controller; ++ #gpio-cells = <2>; ++ interrupts = <58>; ++ clocks = <&ccu CLK_GPIO>; ++ interrupt-names = "gpio_mux"; ++ interrupt-parent = <&intc>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ ++ gcb0: gpio0 { ++ reg-offset = <0x0>; ++ }; ++ ++ gcb1: gpio1 { ++ reg-offset = <0x4>; ++ }; ++ ++ gcb2: gpio2 { ++ reg-offset = <0x8>; ++ }; ++ ++ gcb3: gpio3 { ++ reg-offset = <0x100>; ++ }; ++ }; ++ ++ sdhci0: sdh@d4280000 { ++ compatible = "spacemit,k1-sdhci"; ++ reg = <0x0 0xd4280000 0x0 0x200>; ++ interrupts = <99>; ++ interrupt-parent = <&intc>; ++ resets = <&reset RESET_SDH_AXI>, ++ <&reset RESET_SDH0>; ++ reset-names = "sdh_axi", "sdh0"; ++ clocks = <&ccu CLK_SDH0>, ++ <&ccu CLK_SDH_AXI>, ++ <&ccu CLK_AIB>; ++ clock-names = "sdh-io", "sdh-core","aib-clk"; ++ interconnects = <&dram_range0>; ++ interconnect-names = "dma-mem"; ++ status = "disabled"; ++ }; ++ ++ sdhci1: sdh@d4280800 { ++ compatible = "spacemit,k1-sdhci"; ++ reg = <0x0 0xd4280800 0x0 0x200>; ++ interrupts = <100>; ++ interrupt-parent = <&intc>; ++ resets = <&reset RESET_SDH_AXI>, ++ <&reset RESET_SDH1>; ++ reset-names = "sdh_axi", "sdh1"; ++ clocks = <&ccu CLK_SDH1>, ++ <&ccu CLK_SDH_AXI>; ++ clock-names = "sdh-io", "sdh-core"; ++ interconnects = <&dram_range0>; ++ interconnect-names = "dma-mem"; ++ status = "disabled"; ++ }; ++ ++ sdhci2: sdh@d4281000 { ++ compatible = "spacemit,k1-sdhci"; ++ reg = <0x0 0xd4281000 0x0 0x200>; ++ interrupts = <101>; ++ interrupt-parent = <&intc>; ++ resets = <&reset RESET_SDH_AXI>, ++ <&reset RESET_SDH2>; ++ reset-names = "sdh_axi", "sdh2"; ++ clocks = <&ccu CLK_SDH2>, ++ <&ccu CLK_SDH_AXI>; ++ clock-names = "sdh-io", "sdh-core"; ++ interconnects = <&dram_range0>; ++ interconnect-names = "dma-mem"; ++ status = "disabled"; ++ }; ++ ++ eth0: ethernet@cac80000 { ++ compatible = "spacemit,k1-emac"; ++ reg = <0x0 0xCAC80000 0x0 0x420>; ++ k1,apmu-base-reg = <0xD4282800>; ++ ctrl-reg = <0x3e4>; ++ dline-reg = <0x3e8>; ++ clocks = <&ccu CLK_EMAC0_BUS>, <&ccu CLK_EMAC0_PTP>; ++ clock-names = "emac-clk", "ptp-clk"; ++ resets = <&reset RESET_EMAC0>; ++ reset-names = "emac-reset"; ++ interrupts-extended = <&intc 131>; ++ mac-address = [ 00 00 00 00 00 00 ]; ++ ptp-support; ++ ptp-clk-rate = <10000000>; ++ interconnects = <&dram_range1>; ++ interconnect-names = "dma-mem"; ++ status = "disabled"; ++ }; ++ ++ eth1: ethernet@cac81000 { ++ compatible = "spacemit,k1-emac"; ++ reg = <0x0 0xCAC81000 0x0 0x420>; ++ k1,apmu-base-reg = <0xD4282800>; ++ ctrl-reg = <0x3ec>; ++ dline-reg = <0x3f0>; ++ clocks = <&ccu CLK_EMAC1_BUS>, <&ccu CLK_EMAC1_PTP>; ++ clock-names = "emac-clk", "ptp-clk"; ++ resets = <&reset RESET_EMAC1>; ++ reset-names = "emac-reset"; ++ interrupts-extended = <&intc 133>; ++ mac-address = [ 00 00 00 00 00 00 ]; ++ ptp-support; ++ ptp-clk-rate = <10000000>; ++ interconnects = <&dram_range1>; ++ interconnect-names = "dma-mem"; ++ status = "disabled"; ++ }; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/spacemit/k1-x_pinctrl.dtsi b/arch/riscv/boot/dts/spacemit/k1-x_pinctrl.dtsi +new file mode 100644 +index 000000000000..46b826f6b681 +--- /dev/null ++++ b/arch/riscv/boot/dts/spacemit/k1-x_pinctrl.dtsi +@@ -0,0 +1,1192 @@ ++// SPDX-License-Identifier: (GPL-2.0 OR MIT) ++/* Copyright (c) 2023 Spacemit, Inc */ ++ ++#include ++/* Pin Configuration Node: */ ++/* Format: */ ++&pinctrl { ++ pinctrl_uart0_0: uart0_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(MMC1_DAT3, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(MMC1_DAT2, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_uart0_1: uart0_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(MMC1_CMD, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_80, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_uart0_2: uart0_2_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_68, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_69, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart2: uart2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_21, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_22, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_23, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_24, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart3_0: uart3_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_81, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_82, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_83, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_84, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart3_1: uart3_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_18, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_19, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_20, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_21, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart3_2: uart3_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_53, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_54, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_55, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_56, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart4_0: uart4_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(QSPI_DAT1, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(QSPI_DAT0, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_uart4_1: uart4_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_81, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_82, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_83, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_84, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart4_2: uart4_2_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_23, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_24, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart4_3: uart4_3_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_33, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_34, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_35, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_36, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart4_4: uart4_4_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_111, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_112, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_113, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_114, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart5_0: uart5_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(QSPI_CLK, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(QSPI_CSI, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_uart5_1: uart5_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_25, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_26, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_27, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_28, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart5_2: uart5_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_42, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_43, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_44, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_45, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart5_3: uart5_3_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(PRI_TDI, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TMS, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TCK, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TDO, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart6_0: uart6_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_85, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_86, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_87, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_90, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart6_1: uart6_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_00, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_01, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_02, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_03, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart6_2: uart6_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_56, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_57, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart7_0: uart7_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_88, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_89, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart7_1: uart7_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_04, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_05, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_06, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_07, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart8_0: uart8_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_82, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_83, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart8_1: uart8_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_08, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_09, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_10, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_11, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart8_2: uart8_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_75, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_76, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_77, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_78, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_uart9_0: uart9_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_12, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_13, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart9_1: uart9_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_110, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_115, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_116, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_117, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart9_2: uart9_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(PRI_TCK, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TDO, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c0: i2c0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_54, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_55, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_r_uart1: r_uart1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_49, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_50, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_51, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_52, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_i2c1: i2c1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_56, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_57, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c2_0: i2c2_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_84, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_85, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c2_1: i2c2_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(PRI_TDI, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TMS, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c2_2: i2c2_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_68, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_69, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c3_0: i2c3_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_38, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_39, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c3_1: i2c3_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_47, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_48, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c3_2: i2c3_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_77, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_78, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c4_0: i2c4_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_40, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_41, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c4_1: i2c4_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_75, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS2)) ++ K1X_PADCONF(GPIO_76, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c4_2: i2c4_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_51, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS2)) ++ K1X_PADCONF(GPIO_52, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c5_0: i2c5_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_81, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_82, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c5_1: i2c5_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_54, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_55, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c6_0: i2c6_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_83, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_90, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c6_1: i2c6_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_118, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_119, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c6_2: i2c6_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_56, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_57, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c7: i2c7_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_118, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_119, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c8: i2c8_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(PWR_SCL, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(PWR_SDA, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_one_wire_0: one_wire_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_110, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_one_wire_1: one_wire_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_47, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_ir_rx_0: ir_rx_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(DVL1, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_ir_rx_1: ir_rx_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_79, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_ir_rx_2: ir_rx_2_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_58, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_r_ir_rx_0: r_ir_rx_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_48, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_r_ir_rx_1: r_ir_rx_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_44, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm0_0: pwm0_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(MMC1_DAT3, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_pwm0_1: pwm0_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_14, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm0_2: pwm0_2_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_22, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm1_0: pwm1_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(MMC1_DAT2, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_pwm1_1: pwm1_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_29, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm1_2: pwm1_2_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_23, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm2_0: pwm2_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(MMC1_DAT1, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_pwm2_1: pwm2_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_22, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm2_2: pwm2_2_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_30, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm2_3: pwm2_3_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_24, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm3_0: pwm3_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(MMC1_DAT0, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_pwm3_1: pwm3_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_33, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm3_2: pwm3_2_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_25, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm4_0: pwm4_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(MMC1_CMD, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_pwm4_1: pwm4_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_34, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm5_0: pwm5_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(MMC1_CLK, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_pwm5_1: pwm5_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_35, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm6_0: pwm6_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_88, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm6_1: pwm6_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_36, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm7_0: pwm7_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_92, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm7_1: pwm7_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_37, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm8_0: pwm8_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_00, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm8_1: pwm8_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_38, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm9_0: pwm9_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_01, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm9_1: pwm9_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_39, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm10_0: pwm10_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_02, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm10_1: pwm10_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_40, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm11_0: pwm11_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_03, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm11_1: pwm11_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_41, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm12_0: pwm12_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_04, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm12_1: pwm12_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_42, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm13_0: pwm13_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_05, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm13_1: pwm13_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_43, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm14_0: pwm14_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_06, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm14_1: pwm14_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_44, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm15_0: pwm15_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_07, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm15_1: pwm15_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_45, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm16_0: pwm16_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_09, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm16_1: pwm16_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_46, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm17_0: pwm17_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_10, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm17_1: pwm17_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_53, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm18_0: pwm18_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_11, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm18_1: pwm18_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_57, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm19_0: pwm19_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_13, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm19_1: pwm19_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_63, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_rpwm2_0: rpwm2_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_79, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_rpwm9_0: rpwm9_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_74, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_sspa0_0: sspa0_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_118, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_119, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_120, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_121, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_122, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ >; ++ }; ++ ++ pinctrl_sspa0_1: sspa0_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_58, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_111, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_112, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_113, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_114, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ >; ++ }; ++ ++ pinctrl_sspa1: sspa1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_24, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_25, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_26, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_27, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_28, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ >; ++ }; ++ ++ pinctrl_ssp2_0: ssp2_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_75, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_76, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_77, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_78, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_ssp2_1: ssp2_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_64, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_65, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_66, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_67, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_ssp3_0: ssp3_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_75, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_76, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_77, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_78, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_ssp3_1: ssp3_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_59, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_60, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_61, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_62, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_qspi: qspi_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(QSPI_DAT3, MUX_MODE0, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(QSPI_DAT2, MUX_MODE0, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(QSPI_DAT1, MUX_MODE0, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(QSPI_DAT0, MUX_MODE0, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(QSPI_CLK, MUX_MODE0, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(QSPI_CSI, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_mmc1: mmc1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(MMC1_DAT3, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(MMC1_DAT2, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(MMC1_DAT1, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(MMC1_DAT0, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(MMC1_CMD, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(MMC1_CLK, MUX_MODE0, (EDGE_NONE | PULL_DOWN | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_mmc1_fast: mmc1_fast_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(MMC1_DAT3, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS3)) ++ K1X_PADCONF(MMC1_DAT2, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS3)) ++ K1X_PADCONF(MMC1_DAT1, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS3)) ++ K1X_PADCONF(MMC1_DAT0, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS3)) ++ K1X_PADCONF(MMC1_CMD, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS3)) ++ K1X_PADCONF(MMC1_CLK, MUX_MODE0, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS3)) ++ >; ++ }; ++ ++ pinctrl_mmc2: mmc2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_15, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_16, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_17, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_18, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_19, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_20, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_usb0_0: usb0_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_125, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_126, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_127, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_usb0_1: usb0_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_64, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_65, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_63, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_usb1_0: usb1_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_124, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_usb1_1: usb1_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_66, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_usb2_0: usb2_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_121, MUX_MODE2, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_122, MUX_MODE2, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_123, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_usb2_1: usb2_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_68, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_69, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_67, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie0_0: pcie0_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_15, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_16, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_17, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie0_1: pcie0_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_29, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_30, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_31, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie0_2: pcie0_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_110, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_115, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_116, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie0_3: pcie0_3_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_53, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_54, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_55, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie1_0: pcie1_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_15, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_16, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_17, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie1_1: pcie1_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_32, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_33, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_34, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie1_2: pcie1_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_56, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_57, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_58, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie1_3: pcie1_3_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_59, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_60, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_61, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie2_0: pcie2_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_18, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_19, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_20, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie2_1: pcie2_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_35, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_36, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_37, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie2_2: pcie2_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_62, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_74, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_117, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie2_3: pcie2_3_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_111, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_112, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_113, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie2_4: pcie2_4_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_62, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_112, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_117, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_gmac0: gmac0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_00, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_01, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_02, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_03, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_04, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_05, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_06, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_07, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_08, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_09, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_10, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_11, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_12, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_13, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_14, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_45, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_gmac1: gmac1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_29, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_30, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_31, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_32, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_33, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_34, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_35, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_36, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_37, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_38, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_39, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_40, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_41, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_42, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_43, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_46, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_can_0: can_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_75, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_76, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_can_1: can_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_54, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_55, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_r_can_0: r_can_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_47, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_48, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_r_can_1: r_can_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_110, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_115, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_hdmi_0: hdmi_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_86, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_87, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_88, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_89, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_hdmi_1: hdmi_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_59, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_60, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_61, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_62, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_spi_lcd_0: spi_lcd_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_86, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_87, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_88, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_89, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_90, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_91, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_92, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_spi_lcd_1: spi_lcd_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(PRI_TDI, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TMS, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TCK, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TDO, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_74, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_114, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_63, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_camera0: camera0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_53, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_camera1: camera1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_58, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_camera2: camera2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_120, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pmic: pmic_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(VCXO_EN, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(DVL0, MUX_MODE0, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1X_PADCONF(DVL1, MUX_MODE0, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_mn_clk_0: mn_clk_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_92, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_mn_clk_1: mn_clk_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_81, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_mn_clk_2: mn_clk_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_44, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_mn_clk_3: mn_clk_3_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_20, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_mn_clk_4: mn_clk_4_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_23, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_mn_clk_5: mn_clk_5_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_32, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_mn_clk2_0: mn_clk2_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_91, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_mn_clk2_1: mn_clk2_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_85, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_vcxo_0: vcxo_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(DVL0, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(DVL1, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_vcxo_1: vcxo_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_16, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_17, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_vcxo_2: vcxo_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_89, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_90, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_vcxo_out_0: vcxo_out_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_91, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_vcxo_out_1: vcxo_out_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_12, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_32k_out_0: 32k_out_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_21, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_32k_out_1: 32k_out_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_31, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_32k_out_2: 32k_out_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_28, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pri: pri_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(PRI_TDI, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TMS, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TCK, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TDO, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++}; ++ diff --git a/arch/riscv/boot/dts/thead/Makefile b/arch/riscv/boot/dts/thead/Makefile index b55a17127c2b..3e6311bc9976 100644 --- a/arch/riscv/boot/dts/thead/Makefile @@ -17325,8 +22064,769 @@ index ff364709a6df..a47bf9f15d9a 100644 + }; }; }; +diff --git a/arch/riscv/boot/dts/ultrarisc/Makefile b/arch/riscv/boot/dts/ultrarisc/Makefile +new file mode 100644 +index 000000000000..ef70e28e0b65 +--- /dev/null ++++ b/arch/riscv/boot/dts/ultrarisc/Makefile +@@ -0,0 +1,2 @@ ++# SPDX-License-Identifier: GPL-2.0 ++dtb-y += dp1000.dtb dp1000-evb-v1.dtb +diff --git a/arch/riscv/boot/dts/ultrarisc/dp1000-evb-pinctrl.dtsi b/arch/riscv/boot/dts/ultrarisc/dp1000-evb-pinctrl.dtsi +new file mode 100644 +index 000000000000..b443b3fd48a8 +--- /dev/null ++++ b/arch/riscv/boot/dts/ultrarisc/dp1000-evb-pinctrl.dtsi +@@ -0,0 +1,149 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * UltraRISC DP1000 pinctrl device Tree Source ++ * for DP1000 EVB V1.0 ++ * ++ * Copyright(C) 2025 UltraRISC Technology (Shanghai) Co., Ltd. ++ * ++ */ ++#include ++ ++/ { ++ ++ soc { ++ pmx0: pinmux@11081000 { ++ compatible = "ultrarisc,dp1000-pinctrl"; ++ reg = <0x0 0x11081000 0x0 0x1000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ #pinctrl-cells = <2>; ++ pinctrl-single,register-width = <32>; ++ pinctrl-single,function-mask = <0x3ff>; ++ pinctrl-use-default; ++ ++ i2c0_pins: i2c0_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_A 12 UR_FUNC0 ++ UR_DP1000_IOMUX_A 13 UR_FUNC0 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_A 12 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 13 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ ++ i2c1_pins: i2c1_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_B 6 UR_FUNC0 ++ UR_DP1000_IOMUX_B 7 UR_FUNC0 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_B 6 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_B 7 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ ++ i2c2_pins: i2c2_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_C 0 UR_FUNC0 ++ UR_DP1000_IOMUX_C 1 UR_FUNC0 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_C 0 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_C 1 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ ++ i2c3_pins: i2c3_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_C 2 UR_FUNC0 ++ UR_DP1000_IOMUX_C 3 UR_FUNC0 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_C 2 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_C 3 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ ++ uart0_pins: uart0_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_A 8 UR_FUNC0 ++ UR_DP1000_IOMUX_A 9 UR_FUNC0 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_A 8 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 9 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ ++ uart1_pins: uart1_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_B 4 UR_FUNC0 ++ UR_DP1000_IOMUX_B 5 UR_FUNC0 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_B 4 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_B 5 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ ++ uart2_pins: uart2_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_C 4 UR_FUNC0 ++ UR_DP1000_IOMUX_C 5 UR_FUNC0 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_C 4 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_C 5 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ ++ spi0_pins: spi0_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_D 0 UR_FUNC1 ++ UR_DP1000_IOMUX_D 1 UR_FUNC1 ++ UR_DP1000_IOMUX_D 2 UR_FUNC1 ++ UR_DP1000_IOMUX_D 3 UR_FUNC1 ++ UR_DP1000_IOMUX_D 4 UR_FUNC1 ++ UR_DP1000_IOMUX_D 5 UR_FUNC1 ++ UR_DP1000_IOMUX_D 6 UR_FUNC1 ++ UR_DP1000_IOMUX_D 7 UR_FUNC1 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_D 0 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 1 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 2 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 3 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 4 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 5 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 6 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 7 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ ++ spi1_pins: spi1_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_A 0 UR_FUNC0 ++ UR_DP1000_IOMUX_A 1 UR_FUNC0 ++ UR_DP1000_IOMUX_A 2 UR_FUNC0 ++ UR_DP1000_IOMUX_A 3 UR_FUNC0 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_A 0 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 1 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 2 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 3 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ }; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/ultrarisc/dp1000-evb-v1.dts b/arch/riscv/boot/dts/ultrarisc/dp1000-evb-v1.dts +new file mode 100644 +index 000000000000..c2ca8ae53dda +--- /dev/null ++++ b/arch/riscv/boot/dts/ultrarisc/dp1000-evb-v1.dts +@@ -0,0 +1,53 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * UltraRISC DP1000 device Tree Source ++ * for DP1000 EVB V1.0 ++ * ++ * Copyright (C) 2025 UltraRISC Technology (Shanghai) Co., Ltd. ++ */ ++ ++#include "dp1000.dts" ++#include "dp1000-evb-pinctrl.dtsi" ++#include ++ ++&i2c0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&i2c0_pins>; ++}; ++ ++&i2c1 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&i2c1_pins>; ++}; ++ ++&i2c3 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&i2c3_pins>; ++}; ++ ++&spi0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&spi0_pins>; ++}; ++ ++&spi1 { ++ num-cs = <1>; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&spi1_pins>; ++}; ++ ++&uart0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&uart0_pins>; ++}; ++ ++&uart1 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&uart1_pins>; ++}; ++ ++&uart2 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&uart2_pins>; ++}; +diff --git a/arch/riscv/boot/dts/ultrarisc/dp1000.dts b/arch/riscv/boot/dts/ultrarisc/dp1000.dts +new file mode 100644 +index 000000000000..7e753f891d7b +--- /dev/null ++++ b/arch/riscv/boot/dts/ultrarisc/dp1000.dts +@@ -0,0 +1,533 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Copyright (c) 2019-2022 UltraRISC Technology (Shanghai) Co., Ltd. ++ * ++ */ ++ ++/dts-v1/; ++ ++/ { ++ #address-cells = <0x02>; ++ #size-cells = <0x02>; ++ compatible = "ultrarisc,dp1000"; ++ model = "ultrarisc,dp1000"; ++ ++ chosen { ++ bootargs = "earlycon=sbi console=ttyS1,115200"; ++ stdout-path = &uart1; ++ }; ++ ++ cpus { ++ #address-cells = <0x01>; ++ #size-cells = <0x00>; ++ timebase-frequency = <10000000>; ++ ++ cpu0: cpu@0 { ++ device_type = "cpu"; ++ reg = <0x00>; ++ status = "okay"; ++ compatible = "riscv"; ++ riscv,isa = "rv64imafdcbh"; ++ mmu-type = "riscv,sv48"; ++ clock-frequency = <2000000000>; ++ cpu0_intc:interrupt-controller { ++ #address-cells = <0x01>; ++ interrupt-controller; ++ compatible = "riscv,cpu-intc"; ++ #interrupt-cells = <0x01>; ++ }; ++ }; ++ cpu1: cpu@1 { ++ device_type = "cpu"; ++ reg = <0x1>; ++ status = "okay"; ++ compatible = "riscv"; ++ riscv,isa = "rv64imafdcbh"; ++ mmu-type = "riscv,sv48"; ++ clock-frequency = <2000000000>; ++ cpu1_intc:interrupt-controller { ++ #address-cells = <0x01>; ++ interrupt-controller; ++ compatible = "riscv,cpu-intc"; ++ #interrupt-cells = <0x01>; ++ }; ++ }; ++ cpu2: cpu@2 { ++ device_type = "cpu"; ++ reg = <0x2>; ++ status = "okay"; ++ compatible = "riscv"; ++ riscv,isa = "rv64imafdcbh"; ++ mmu-type = "riscv,sv48"; ++ clock-frequency = <2000000000>; ++ cpu2_intc:interrupt-controller { ++ #address-cells = <0x01>; ++ interrupt-controller; ++ compatible = "riscv,cpu-intc"; ++ #interrupt-cells = <0x01>; ++ }; ++ }; ++ cpu3: cpu@3 { ++ device_type = "cpu"; ++ reg = <0x3>; ++ status = "okay"; ++ compatible = "riscv"; ++ riscv,isa = "rv64imafdcbh"; ++ mmu-type = "riscv,sv48"; ++ clock-frequency = <2000000000>; ++ cpu3_intc:interrupt-controller { ++ #address-cells = <0x01>; ++ interrupt-controller; ++ compatible = "riscv,cpu-intc"; ++ #interrupt-cells = <0x01>; ++ }; ++ }; ++ cpu4: cpu@4 { ++ device_type = "cpu"; ++ reg = <0x10>; ++ status = "okay"; ++ compatible = "riscv"; ++ riscv,isa = "rv64imafdcbh"; ++ mmu-type = "riscv,sv48"; ++ clock-frequency = <2000000000>; ++ cpu4_intc:interrupt-controller { ++ #address-cells = <0x01>; ++ interrupt-controller; ++ compatible = "riscv,cpu-intc"; ++ #interrupt-cells = <0x01>; ++ }; ++ }; ++ cpu5: cpu@5 { ++ device_type = "cpu"; ++ reg = <0x11>; ++ status = "okay"; ++ compatible = "riscv"; ++ riscv,isa = "rv64imafdcbh"; ++ mmu-type = "riscv,sv48"; ++ clock-frequency = <2000000000>; ++ cpu5_intc:interrupt-controller { ++ #address-cells = <0x01>; ++ interrupt-controller; ++ compatible = "riscv,cpu-intc"; ++ #interrupt-cells = <0x01>; ++ }; ++ }; ++ cpu6: cpu@6 { ++ device_type = "cpu"; ++ reg = <0x12>; ++ status = "okay"; ++ compatible = "riscv"; ++ riscv,isa = "rv64imafdcbh"; ++ mmu-type = "riscv,sv48"; ++ clock-frequency = <2000000000>; ++ cpu6_intc:interrupt-controller { ++ #address-cells = <0x01>; ++ interrupt-controller; ++ compatible = "riscv,cpu-intc"; ++ #interrupt-cells = <0x01>; ++ }; ++ }; ++ cpu7: cpu@7 { ++ device_type = "cpu"; ++ reg = <0x13>; ++ status = "okay"; ++ compatible = "riscv"; ++ riscv,isa = "rv64imafdcbh"; ++ mmu-type = "riscv,sv48"; ++ clock-frequency = <2000000000>; ++ cpu7_intc:interrupt-controller { ++ #address-cells = <0x01>; ++ interrupt-controller; ++ compatible = "riscv,cpu-intc"; ++ #interrupt-cells = <0x01>; ++ }; ++ }; ++ }; ++ ++ memory@80000000 { ++ device_type = "memory"; ++ reg = <0x00 0x80000000 0x4 0x00000000>; ++ }; ++ ++ soc { ++ #address-cells = <0x02>; ++ #size-cells = <0x02>; ++ compatible = "simple-bus"; ++ ranges; ++ ++ clocks { ++ compatible = "simple-bus"; ++ u-boot,dm-pre-reloc; ++ device_clk: device_clk { ++ compatible = "fixed-clock"; ++ clock-frequency = <62500000>; ++ #clock-cells = <0>; ++ }; ++ csr_clk: csr_clk { ++ compatible = "fixed-clock"; ++ clock-frequency = <250000000>; ++ #clock-cells = <0>; ++ }; ++ }; ++ ++ clint: clint@8000000 { ++ compatible = "riscv,clint0"; ++ interrupts-extended = <&cpu0_intc 0x03>, <&cpu0_intc 0x07>, ++ <&cpu1_intc 0x03>, <&cpu1_intc 0x07>, ++ <&cpu2_intc 0x03>, <&cpu2_intc 0x07>, ++ <&cpu3_intc 0x03>, <&cpu3_intc 0x07>, ++ <&cpu4_intc 0x03>, <&cpu4_intc 0x07>, ++ <&cpu5_intc 0x03>, <&cpu5_intc 0x07>, ++ <&cpu6_intc 0x03>, <&cpu6_intc 0x07>, ++ <&cpu7_intc 0x03>, <&cpu7_intc 0x07>; ++ reg = <0x00 0x8000000 0x00 0x100000>; ++ }; ++ ++ plic: plic@9000000 { ++ #interrupt-cells = <1>; ++ #address-cells = <0>; ++ phandle = <0x01>; ++ compatible = "ultrarisc,dp1000-plic"; ++ interrupt-controller; ++ interrupts-extended = <&cpu0_intc 0xb>, <&cpu0_intc 0x9>, <&cpu0_intc 0xa>, ++ <&cpu1_intc 0xb>, <&cpu1_intc 0x9>, <&cpu1_intc 0xa>, ++ <&cpu2_intc 0xb>, <&cpu2_intc 0x9>, <&cpu2_intc 0xa>, ++ <&cpu3_intc 0xb>, <&cpu3_intc 0x9>, <&cpu3_intc 0xa>, ++ <&cpu4_intc 0xb>, <&cpu4_intc 0x9>, <&cpu4_intc 0xa>, ++ <&cpu5_intc 0xb>, <&cpu5_intc 0x9>, <&cpu5_intc 0xa>, ++ <&cpu6_intc 0xb>, <&cpu6_intc 0x9>, <&cpu6_intc 0xa>, ++ <&cpu7_intc 0xb>, <&cpu7_intc 0x9>, <&cpu7_intc 0xa>; ++ reg = <0x00 0x9000000 0x00 0x4000000>; ++ riscv,max-priority = <0x07>; ++ riscv,ndev = <160>; ++ }; ++ ++ uart0: serial@20300000 { ++ interrupt-parent = <0x01>; ++ interrupts = <17>; ++ clock-frequency = <62500000>; ++ current-speed = <115200>; ++ reg = <0x00 0x20300000 0x00 0x10000>; ++ compatible = "ultrarisc,dp1000-uart","ns16550"; ++ reg-offset = <0x0>; ++ reg-shift = <0x02>; ++ }; ++ ++ uart1: serial@20310000 { ++ interrupt-parent = <0x01>; ++ interrupts = <18>; ++ clock-frequency = <62500000>; ++ current-speed = <115200>; ++ reg = <0x00 0x20310000 0x00 0x10000>; ++ compatible = "ultrarisc,dp1000-uart","ns16550"; ++ reg-offset = <0x0>; ++ reg-shift = <0x02>; ++ }; ++ ++ uart2: serial@20400000 { ++ interrupt-parent = <0x01>; ++ interrupts = <25>; ++ clock-frequency = <62500000>; ++ current-speed = <115200>; ++ reg = <0x00 0x20400000 0x00 0x10000>; ++ compatible = "ultrarisc,dp1000-uart","ns16550"; ++ reg-offset = <0x0>; ++ reg-shift = <0x02>; ++ }; ++ ++ uart3: serial@20410000 { ++ interrupt-parent = <0x01>; ++ interrupts = <26>; ++ clock-frequency = <62500000>; ++ current-speed = <115200>; ++ reg = <0x00 0x20410000 0x00 0x10000>; ++ compatible = "ultrarisc,dp1000-uart","ns16550"; ++ reg-offset = <0x0>; ++ reg-shift = <0x02>; ++ }; ++ ++ spi0: spi@20320000 { ++ compatible = "baikal,bt1-ssi","snps,dw-apb-ssi"; ++ status = "okay"; ++ #address-cells = <0x01>; ++ #size-cells = <0x00>; ++ reg = <0x0 0x20320000 0x0 0x1000>; ++ interrupt-parent = <0x01>; ++ interrupts = <19>; ++ clocks = <&device_clk>; ++ clock-names = "device_clk"; ++ num-cs = <3>; ++ spi-max-frequency = <62500000>; ++ mmc0: mmc@0 { ++ compatible = "mmc-spi-slot"; ++ spi-max-frequency = <15625000>; ++ reg = <0x00>; ++ voltage-ranges = <3300 3300>; ++ disable-wp; ++ }; ++ }; ++ ++ spi1: spi@20420000 { ++ compatible = "baikal,bt1-ssi","snps,dw-apb-ssi"; ++ status = "okay"; ++ #address-cells = <0x01>; ++ #size-cells = <0x00>; ++ reg = <0x0 0x20420000 0x0 0x1000>; ++ interrupt-parent = <0x01>; ++ interrupts = <27>; ++ clocks = <&device_clk>; ++ clock-names = "device_clk"; ++ num-cs = <3>; ++ spi-max-frequency = <62500000>; ++ }; ++ ++ i2c0: i2c@20330000{ ++ compatible = "snps,designware-i2c"; ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x20330000 0x0 0x100>; ++ clock-frequency = <400000>; ++ clocks = <&device_clk>; ++ interrupt-parent = <0x01>; ++ interrupts = <20>; ++ }; ++ ++ i2c1: i2c@20340000{ ++ compatible = "snps,designware-i2c"; ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x20340000 0x0 0x100>; ++ clock-frequency = <400000>; ++ clocks = <&device_clk>; ++ interrupt-parent = <0x01>; ++ interrupts = <21>; ++ }; ++ ++ i2c2: i2c@20430000{ ++ compatible = "snps,designware-i2c"; ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x20430000 0x0 0x100>; ++ clock-frequency = <400000>; ++ clocks = <&device_clk>; ++ interrupt-parent = <0x01>; ++ interrupts = <28>; ++ }; ++ ++ i2c3: i2c@20440000{ ++ compatible = "snps,designware-i2c"; ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x20440000 0x0 0x100>; ++ clock-frequency = <400000>; ++ clocks = <&device_clk>; ++ interrupt-parent = <0x01>; ++ interrupts = <29>; ++ }; ++ ++ wdt0: watchdog@20210000 { ++ compatible = "snps,dw-wdt"; ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x20210000 0x0 0x100>; ++ interrupt-parent = <0x01>; ++ interrupts = <33>; ++ clocks = <&device_clk>; ++ }; ++ ++ timer0: timer@20220000 { ++ compatible = "snps,dw-apb-timer"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x20220000 0x0 0x100>; ++ clocks = <&device_clk>; ++ interrupt-parent = <0x01>; ++ interrupts = <35>; ++ status = "okay"; ++ }; ++ ++ timer1: timer@20230000 { ++ compatible = "snps,dw-apb-timer"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x20230000 0x0 0x100>; ++ clocks = <&device_clk>; ++ interrupt-parent = <0x01>; ++ interrupts = <36>; ++ status = "okay"; ++ }; ++ ++ gpio: gpio@20200000 { ++ compatible = "snps,dw-apb-gpio"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x20200000 0x0 0x1000>; ++ clocks = <&csr_clk>, <&device_clk>; ++ clock-names = "bus", "db"; ++ status = "okay"; ++ ++ porta: gpio-port@0 { ++ compatible = "snps,dw-apb-gpio-port"; ++ reg = <0>; ++ gpio-controller; ++ #gpio-cells = <2>; ++ snps,nr-gpios = <16>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ interrupt-parent = <0x01>; ++ interrupts = <34>; ++ }; ++ ++ portb: gpio-port@1 { ++ compatible = "snps,dw-apb-gpio-port"; ++ reg = <1>; ++ gpio-controller; ++ #gpio-cells = <2>; ++ snps,nr-gpios = <8>; ++ }; ++ ++ portc: gpio-port@2 { ++ compatible = "snps,dw-apb-gpio-port"; ++ reg = <2>; ++ gpio-controller; ++ #gpio-cells = <2>; ++ snps,nr-gpios = <8>; ++ }; ++ ++ portd: gpio-port@3 { ++ compatible = "snps,dw-apb-gpio-port"; ++ reg = <3>; ++ gpio-controller; ++ #gpio-cells = <2>; ++ snps,nr-gpios = <8>; ++ }; ++ }; ++ ++ ethernet1@38000000 { ++ clocks = <&csr_clk>; ++ clock-names = "stmmaceth"; ++ compatible = "ultrarisc,dp1000-gmac", "snps,dwmac-5.10a"; ++ interrupt-parent = <0x01>; ++ interrupts = <84>; ++ interrupt-names = "macirq"; ++ reg = <0x00 0x38000000 0x00 0x1000000>; ++ local-mac-address = [ff ff ff ff ff ff]; ++ phy-mode = "rgmii"; ++ max-speed = <1000>; ++ snps,txpbl = <8>; ++ snps,rxpbl = <8>; ++ phy-handle = <&phy0>; ++ mdio { ++ #address-cells = <0x01>; ++ #size-cells = <0x00>; ++ compatible = "snps,dwmac-mdio"; ++ phy0: phy@0{ ++ phandle = <0x04>; ++ reg = <0x00>; ++ status = "okay"; ++ }; ++ }; ++ }; ++ ++ dmac: dma-controller@39000000 { ++ compatible = "snps,axi-dma-1.01a"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x39000000 0x0 0x400>; ++ clocks = <&device_clk>, <&device_clk>; ++ clock-names = "core-clk", "cfgr-clk"; ++ interrupt-parent = <0x01>; ++ interrupts = <152>; ++ #dma-cells = <1>; ++ dma-channels = <8>; ++ snps,dma-masters = <1>; ++ snps,data-width = <4>; ++ snps,block-size = <512 512 512 512 512 512 512 512>; ++ snps,priority = <0 1 2 3 4 5 6 7>; ++ snps,axi-max-burst-len = <256>; ++ }; ++ ++ pcie_x16: pcie@21000000 { ++ compatible = "ultrarisc,dw-pcie"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ #interrupt-cells = <1>; ++ reg = <0x0 0x21000000 0x0 0x01000000>, /* IP registers */ ++ <0x0 0x4fff0000 0x0 0x00010000>; /* Configuration space */ ++ reg-names = "dbi", "config"; ++ device_type = "pci"; ++ dma-coherent; ++ bus-range = <0x0 0xff>; ++ num-lanes = <16>; ++ ranges = <0x81000000 0x0 0x4fbf0000 0x0 0x4fbf0000 0x0 0x00400000>, ++ <0x82000000 0x0 0x40000000 0x0 0x40000000 0x0 0x0fbf0000>, ++ <0xc3000000 0x40 0x00000000 0x40 0x00000000 0xd 0x00000000>; ++ max-link-speed = <4>; ++ interrupt-parent = <&plic>; ++ interrupts = <43>, <44>, <45>, <46>, <47>, <48>; ++ interrupt-names = "msi", "inta", "intb", "intc", "intd", "aer"; ++ interrupt-map-mask = <0x0 0x0 0x0 0x7>; ++ interrupt-map = <0x0 0x0 0x0 0x1 &plic 44>, ++ <0x0 0x0 0x0 0x2 &plic 45>, ++ <0x0 0x0 0x0 0x3 &plic 46>, ++ <0x0 0x0 0x0 0x4 &plic 47>; ++ }; ++ ++ pcie_x4a: pcie@23000000 { ++ compatible = "ultrarisc,dw-pcie"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ #interrupt-cells = <1>; ++ reg = <0x0 0x23000000 0x0 0x01000000>, /* IP registers */ ++ <0x0 0x6fff0000 0x0 0x00010000>; /* Configuration space */ ++ reg-names = "dbi", "config"; ++ device_type = "pci"; ++ dma-coherent; ++ bus-range = <0x0 0xff>; ++ num-lanes = <4>; ++ ranges = <0x81000000 0x0 0x6fbf0000 0x0 0x6fbf0000 0x0 0x00400000>, ++ <0x82000000 0x0 0x60000000 0x0 0x60000000 0x0 0x0fbf0000>, ++ <0xc3000000 0x80 0x00000000 0x80 0x00000000 0xd 0x00000000>; ++ max-link-speed = <4>; ++ interrupt-parent = <&plic>; ++ interrupts = <63>, <64>, <65>, <66>, <67>, <68>; ++ interrupt-names = "msi", "inta", "intb", "intc", "intd", "aer"; ++ interrupt-map-mask = <0x0 0x0 0x0 0x7>; ++ interrupt-map = <0x0 0x0 0x0 0x1 &plic 64>, ++ <0x0 0x0 0x0 0x2 &plic 65>, ++ <0x0 0x0 0x0 0x3 &plic 66>, ++ <0x0 0x0 0x0 0x4 &plic 67>; ++ }; ++ ++ pcie_x4b: pcie@24000000 { ++ compatible = "ultrarisc,dw-pcie"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ #interrupt-cells = <1>; ++ reg = <0x0 0x24000000 0x0 0x01000000>, /* IP registers */ ++ <0x0 0x7fff0000 0x0 0x00010000>; /* Configuration space */ ++ reg-names = "dbi", "config"; ++ device_type = "pci"; ++ dma-coherent; ++ bus-range = <0x0 0xff>; ++ num-lanes = <4>; ++ ranges = <0x81000000 0x0 0x7fbf0000 0x0 0x7fbf0000 0x0 0x00400000>, ++ <0x82000000 0x0 0x70000000 0x0 0x70000000 0x0 0x0fbf0000>, ++ <0xc3000000 0xc0 0x00000000 0xc0 0x00000000 0xd 0x00000000>; ++ max-link-speed = <4>; ++ interrupt-parent = <&plic>; ++ interrupts = <73>, <74>, <75>, <76>, <77>, <78>; ++ interrupt-names = "msi", "inta", "intb", "intc", "intd", "aer"; ++ interrupt-map-mask = <0x0 0x0 0x0 0x7>; ++ interrupt-map = <0x0 0x0 0x0 0x1 &plic 74>, ++ <0x0 0x0 0x0 0x2 &plic 75>, ++ <0x0 0x0 0x0 0x3 &plic 76>, ++ <0x0 0x0 0x0 0x4 &plic 77>; ++ }; ++ }; ++}; diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig -index ab86ec3b9eab..ee97fe565df7 100644 +index ab86ec3b9eab..33159406ee58 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -28,6 +28,7 @@ CONFIG_PROFILING=y @@ -17337,7 +22837,15 @@ index ab86ec3b9eab..ee97fe565df7 100644 CONFIG_SOC_SIFIVE=y CONFIG_SOC_STARFIVE=y CONFIG_ARCH_SUNXI=y -@@ -142,6 +143,13 @@ CONFIG_SPI_SUN6I=y +@@ -36,6 +37,7 @@ CONFIG_SMP=y + CONFIG_HOTPLUG_CPU=y + CONFIG_PM=y + CONFIG_CPU_IDLE=y ++CONFIG_ACPI_CPPC_CPUFREQ=m + CONFIG_VIRTUALIZATION=y + CONFIG_KVM=m + CONFIG_ACPI=y +@@ -142,6 +144,13 @@ CONFIG_SPI_SUN6I=y # CONFIG_PTP_1588_CLOCK is not set CONFIG_GPIO_SIFIVE=y CONFIG_WATCHDOG=y @@ -17351,7 +22859,7 @@ index ab86ec3b9eab..ee97fe565df7 100644 CONFIG_SUNXI_WATCHDOG=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y -@@ -168,21 +176,25 @@ CONFIG_MMC=y +@@ -168,21 +177,25 @@ CONFIG_MMC=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_CADENCE=y @@ -17377,7 +22885,7 @@ index ab86ec3b9eab..ee97fe565df7 100644 CONFIG_ARCH_R9A07G043=y CONFIG_PHY_SUN4I_USB=m CONFIG_LIBNVDIMM=y -@@ -238,5 +250,13 @@ CONFIG_DEBUG_SG=y +@@ -238,5 +251,13 @@ CONFIG_DEBUG_SG=y # CONFIG_RCU_TRACE is not set CONFIG_RCU_EQS_DEBUG=y # CONFIG_FTRACE is not set @@ -17392,8 +22900,5581 @@ index ab86ec3b9eab..ee97fe565df7 100644 +CONFIG_MAILBOX=y +# TH1520 PMIC_WDT +CONFIG_TH1520_PMIC_WATCHDOG=y +diff --git a/arch/riscv/configs/dp1000_defconfig b/arch/riscv/configs/dp1000_defconfig +new file mode 100644 +index 000000000000..a1ea4921ed39 +--- /dev/null ++++ b/arch/riscv/configs/dp1000_defconfig +@@ -0,0 +1,5530 @@ ++CONFIG_LOCALVERSION="-ur-dp1000-rvck" ++# CONFIG_LOCALVERSION_AUTO is not set ++CONFIG_SYSVIPC=y ++CONFIG_POSIX_MQUEUE=y ++CONFIG_WATCH_QUEUE=y ++CONFIG_USELIB=y ++# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set ++CONFIG_NO_HZ=y ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_BPF_JIT=y ++CONFIG_BPF_JIT_ALWAYS_ON=y ++CONFIG_BPF_LSM=y ++CONFIG_PREEMPT_VOLUNTARY=y ++CONFIG_VIRT_CPU_ACCOUNTING_GEN=y ++CONFIG_BSD_PROCESS_ACCT=y ++CONFIG_BSD_PROCESS_ACCT_V3=y ++CONFIG_TASKSTATS=y ++CONFIG_TASK_DELAY_ACCT=y ++CONFIG_TASK_XACCT=y ++CONFIG_TASK_IO_ACCOUNTING=y ++CONFIG_PSI=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_IKHEADERS=m ++CONFIG_LOG_BUF_SHIFT=18 ++CONFIG_NUMA_BALANCING=y ++CONFIG_MEMCG=y ++CONFIG_BLK_CGROUP=y ++CONFIG_CFS_BANDWIDTH=y ++CONFIG_CGROUP_PIDS=y ++CONFIG_CGROUP_RDMA=y ++CONFIG_CGROUP_FREEZER=y ++CONFIG_CGROUP_HUGETLB=y ++CONFIG_CPUSETS=y ++CONFIG_CGROUP_DEVICE=y ++CONFIG_CGROUP_CPUACCT=y ++CONFIG_CGROUP_PERF=y ++CONFIG_CGROUP_BPF=y ++CONFIG_CGROUP_MISC=y ++CONFIG_NAMESPACES=y ++CONFIG_USER_NS=y ++CONFIG_CHECKPOINT_RESTORE=y ++CONFIG_SCHED_AUTOGROUP=y ++CONFIG_EXPERT=y ++CONFIG_SGETMASK_SYSCALL=y ++CONFIG_PC104=y ++CONFIG_PROFILING=y ++CONFIG_KEXEC=y ++CONFIG_KEXEC_FILE=y ++CONFIG_CRASH_DUMP=y ++CONFIG_SOC_MICROCHIP_POLARFIRE=y ++CONFIG_SOC_SIFIVE=y ++CONFIG_SOC_STARFIVE=y ++CONFIG_ARCH_ULTRARISC=y ++CONFIG_SOC_VIRT=y ++CONFIG_ERRATA_THEAD=y ++CONFIG_SMP=y ++CONFIG_NR_CPUS=8 ++# CONFIG_HIGHMEM is not set ++CONFIG_NUMA=y ++CONFIG_RISCV_SBI_V01=y ++CONFIG_CMDLINE="earlycon=sbi console=ttyS1,115200n8 console=tty0 root=PARTLABEL=ur-rootfs rw rootfstype=ext4 rootwait panic=0 loglevel=7 earlyprintk" ++CONFIG_PM_DEBUG=y ++CONFIG_PM_ADVANCED_DEBUG=y ++CONFIG_CPU_IDLE=y ++CONFIG_CPU_IDLE_GOV_LADDER=y ++CONFIG_CPU_IDLE_GOV_MENU=y ++CONFIG_CPU_IDLE_GOV_TEO=y ++CONFIG_CPU_FREQ=y ++CONFIG_VIRTUALIZATION=y ++CONFIG_KVM=m ++CONFIG_JUMP_LABEL=y ++CONFIG_ARCH_MMAP_RND_BITS=24 ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++CONFIG_MODVERSIONS=y ++CONFIG_MODULE_SRCVERSION_ALL=y ++# CONFIG_MODULE_SIG_ALL is not set ++CONFIG_MODULE_COMPRESS_ZSTD=y ++CONFIG_BLK_DEV_ZONED=y ++CONFIG_BLK_DEV_THROTTLING=y ++CONFIG_BLK_WBT=y ++CONFIG_BLK_CGROUP_FC_APPID=y ++CONFIG_BLK_CGROUP_IOCOST=y ++CONFIG_BLK_CGROUP_IOPRIO=y ++CONFIG_BLK_SED_OPAL=y ++CONFIG_BLK_INLINE_ENCRYPTION=y ++CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y ++CONFIG_PARTITION_ADVANCED=y ++CONFIG_AIX_PARTITION=y ++CONFIG_OSF_PARTITION=y ++CONFIG_AMIGA_PARTITION=y ++CONFIG_ATARI_PARTITION=y ++CONFIG_MAC_PARTITION=y ++CONFIG_BSD_DISKLABEL=y ++CONFIG_MINIX_SUBPARTITION=y ++CONFIG_SOLARIS_X86_PARTITION=y ++CONFIG_UNIXWARE_DISKLABEL=y ++CONFIG_LDM_PARTITION=y ++CONFIG_SGI_PARTITION=y ++CONFIG_ULTRIX_PARTITION=y ++CONFIG_SUN_PARTITION=y ++CONFIG_KARMA_PARTITION=y ++CONFIG_SYSV68_PARTITION=y ++CONFIG_CMDLINE_PARTITION=y ++CONFIG_MQ_IOSCHED_KYBER=m ++CONFIG_IOSCHED_BFQ=m ++CONFIG_BINFMT_MISC=m ++CONFIG_ZSWAP=y ++CONFIG_ZSMALLOC=y ++CONFIG_SLAB_FREELIST_RANDOM=y ++CONFIG_SLAB_FREELIST_HARDENED=y ++CONFIG_SHUFFLE_PAGE_ALLOCATOR=y ++# CONFIG_COMPAT_BRK is not set ++# CONFIG_SPARSEMEM_VMEMMAP is not set ++CONFIG_KSM=y ++CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 ++CONFIG_TRANSPARENT_HUGEPAGE=y ++CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y ++CONFIG_CMA=y ++CONFIG_CMA_SYSFS=y ++CONFIG_CMA_AREAS=7 ++CONFIG_IDLE_PAGE_TRACKING=y ++CONFIG_ANON_VMA_NAME=y ++CONFIG_USERFAULTFD=y ++CONFIG_LRU_GEN=y ++CONFIG_LRU_GEN_ENABLED=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_PACKET_DIAG=m ++CONFIG_UNIX_DIAG=m ++CONFIG_TLS=m ++CONFIG_TLS_DEVICE=y ++CONFIG_XFRM_USER=m ++CONFIG_XFRM_INTERFACE=m ++CONFIG_XFRM_STATISTICS=y ++CONFIG_NET_KEY=m ++CONFIG_SMC=m ++CONFIG_SMC_DIAG=m ++CONFIG_XDP_SOCKETS=y ++CONFIG_XDP_SOCKETS_DIAG=m ++CONFIG_IP_MULTICAST=y ++CONFIG_IP_ADVANCED_ROUTER=y ++CONFIG_IP_FIB_TRIE_STATS=y ++CONFIG_IP_MULTIPLE_TABLES=y ++CONFIG_IP_ROUTE_MULTIPATH=y ++CONFIG_IP_ROUTE_VERBOSE=y ++CONFIG_IP_PNP=y ++CONFIG_IP_PNP_DHCP=y ++CONFIG_IP_PNP_BOOTP=y ++CONFIG_IP_PNP_RARP=y ++CONFIG_NET_IPIP=m ++CONFIG_NET_IPGRE_DEMUX=m ++CONFIG_NET_IPGRE=m ++CONFIG_NET_IPGRE_BROADCAST=y ++CONFIG_IP_MROUTE=y ++CONFIG_IP_MROUTE_MULTIPLE_TABLES=y ++CONFIG_IP_PIMSM_V1=y ++CONFIG_IP_PIMSM_V2=y ++CONFIG_NET_IPVTI=m ++CONFIG_NET_FOU_IP_TUNNELS=y ++CONFIG_INET_AH=m ++CONFIG_INET_ESP=m ++CONFIG_INET_ESP_OFFLOAD=m ++CONFIG_INET_ESPINTCP=y ++CONFIG_INET_IPCOMP=m ++CONFIG_INET_DIAG=m ++CONFIG_INET_UDP_DIAG=m ++CONFIG_INET_RAW_DIAG=m ++CONFIG_INET_DIAG_DESTROY=y ++CONFIG_TCP_CONG_ADVANCED=y ++CONFIG_TCP_CONG_HSTCP=m ++CONFIG_TCP_CONG_HYBLA=m ++CONFIG_TCP_CONG_NV=m ++CONFIG_TCP_CONG_SCALABLE=m ++CONFIG_TCP_CONG_LP=m ++CONFIG_TCP_CONG_VENO=m ++CONFIG_TCP_CONG_YEAH=m ++CONFIG_TCP_CONG_ILLINOIS=m ++CONFIG_TCP_CONG_DCTCP=m ++CONFIG_TCP_CONG_CDG=m ++CONFIG_TCP_CONG_BBR=m ++CONFIG_TCP_MD5SIG=y ++CONFIG_IPV6_ROUTER_PREF=y ++CONFIG_IPV6_ROUTE_INFO=y ++CONFIG_INET6_AH=m ++CONFIG_INET6_ESP=m ++CONFIG_INET6_ESP_OFFLOAD=m ++CONFIG_INET6_ESPINTCP=y ++CONFIG_INET6_IPCOMP=m ++CONFIG_IPV6_MIP6=m ++CONFIG_IPV6_ILA=m ++CONFIG_IPV6_VTI=m ++CONFIG_IPV6_SIT=m ++CONFIG_IPV6_SIT_6RD=y ++CONFIG_IPV6_GRE=m ++CONFIG_IPV6_SUBTREES=y ++CONFIG_IPV6_MROUTE=y ++CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y ++CONFIG_IPV6_PIMSM_V2=y ++CONFIG_IPV6_SEG6_LWTUNNEL=y ++CONFIG_IPV6_SEG6_HMAC=y ++CONFIG_IPV6_IOAM6_LWTUNNEL=y ++CONFIG_MPTCP=y ++CONFIG_NETWORK_PHY_TIMESTAMPING=y ++CONFIG_NETFILTER=y ++CONFIG_BRIDGE_NETFILTER=m ++CONFIG_NETFILTER_NETLINK_HOOK=m ++CONFIG_NF_CONNTRACK=m ++CONFIG_NF_CONNTRACK_SECMARK=y ++CONFIG_NF_CONNTRACK_ZONES=y ++CONFIG_NF_CONNTRACK_EVENTS=y ++CONFIG_NF_CONNTRACK_TIMEOUT=y ++CONFIG_NF_CONNTRACK_TIMESTAMP=y ++CONFIG_NF_CONNTRACK_AMANDA=m ++CONFIG_NF_CONNTRACK_FTP=m ++CONFIG_NF_CONNTRACK_H323=m ++CONFIG_NF_CONNTRACK_IRC=m ++CONFIG_NF_CONNTRACK_NETBIOS_NS=m ++CONFIG_NF_CONNTRACK_SNMP=m ++CONFIG_NF_CONNTRACK_PPTP=m ++CONFIG_NF_CONNTRACK_SANE=m ++CONFIG_NF_CONNTRACK_SIP=m ++CONFIG_NF_CONNTRACK_TFTP=m ++CONFIG_NF_CT_NETLINK=m ++CONFIG_NF_CT_NETLINK_TIMEOUT=m ++CONFIG_NF_CT_NETLINK_HELPER=m ++CONFIG_NETFILTER_NETLINK_GLUE_CT=y ++CONFIG_NF_TABLES=m ++CONFIG_NF_TABLES_INET=y ++CONFIG_NF_TABLES_NETDEV=y ++CONFIG_NFT_NUMGEN=m ++CONFIG_NFT_CT=m ++CONFIG_NFT_FLOW_OFFLOAD=m ++CONFIG_NFT_CONNLIMIT=m ++CONFIG_NFT_LOG=m ++CONFIG_NFT_LIMIT=m ++CONFIG_NFT_MASQ=m ++CONFIG_NFT_REDIR=m ++CONFIG_NFT_NAT=m ++CONFIG_NFT_TUNNEL=m ++CONFIG_NFT_QUEUE=m ++CONFIG_NFT_QUOTA=m ++CONFIG_NFT_REJECT=m ++CONFIG_NFT_COMPAT=m ++CONFIG_NFT_HASH=m ++CONFIG_NFT_FIB_INET=m ++CONFIG_NFT_XFRM=m ++CONFIG_NFT_SOCKET=m ++CONFIG_NFT_OSF=m ++CONFIG_NFT_TPROXY=m ++CONFIG_NFT_SYNPROXY=m ++CONFIG_NFT_DUP_NETDEV=m ++CONFIG_NFT_FWD_NETDEV=m ++CONFIG_NFT_FIB_NETDEV=m ++CONFIG_NFT_REJECT_NETDEV=m ++CONFIG_NF_FLOW_TABLE_INET=m ++CONFIG_NF_FLOW_TABLE=m ++CONFIG_NETFILTER_XT_SET=m ++CONFIG_NETFILTER_XT_TARGET_AUDIT=m ++CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m ++CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m ++CONFIG_NETFILTER_XT_TARGET_CONNMARK=m ++CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m ++CONFIG_NETFILTER_XT_TARGET_CT=m ++CONFIG_NETFILTER_XT_TARGET_DSCP=m ++CONFIG_NETFILTER_XT_TARGET_HMARK=m ++CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m ++CONFIG_NETFILTER_XT_TARGET_LED=m ++CONFIG_NETFILTER_XT_TARGET_LOG=m ++CONFIG_NETFILTER_XT_TARGET_MARK=m ++CONFIG_NETFILTER_XT_TARGET_NFLOG=m ++CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m ++CONFIG_NETFILTER_XT_TARGET_TEE=m ++CONFIG_NETFILTER_XT_TARGET_TPROXY=m ++CONFIG_NETFILTER_XT_TARGET_TRACE=m ++CONFIG_NETFILTER_XT_TARGET_SECMARK=m ++CONFIG_NETFILTER_XT_TARGET_TCPMSS=m ++CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m ++CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m ++CONFIG_NETFILTER_XT_MATCH_BPF=m ++CONFIG_NETFILTER_XT_MATCH_CGROUP=m ++CONFIG_NETFILTER_XT_MATCH_CLUSTER=m ++CONFIG_NETFILTER_XT_MATCH_COMMENT=m ++CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m ++CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m ++CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m ++CONFIG_NETFILTER_XT_MATCH_CONNMARK=m ++CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m ++CONFIG_NETFILTER_XT_MATCH_CPU=m ++CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m ++CONFIG_NETFILTER_XT_MATCH_DSCP=m ++CONFIG_NETFILTER_XT_MATCH_ESP=m ++CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m ++CONFIG_NETFILTER_XT_MATCH_HELPER=m ++CONFIG_NETFILTER_XT_MATCH_IPCOMP=m ++CONFIG_NETFILTER_XT_MATCH_IPRANGE=m ++CONFIG_NETFILTER_XT_MATCH_IPVS=m ++CONFIG_NETFILTER_XT_MATCH_LENGTH=m ++CONFIG_NETFILTER_XT_MATCH_LIMIT=m ++CONFIG_NETFILTER_XT_MATCH_MAC=m ++CONFIG_NETFILTER_XT_MATCH_MARK=m ++CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m ++CONFIG_NETFILTER_XT_MATCH_NFACCT=m ++CONFIG_NETFILTER_XT_MATCH_OSF=m ++CONFIG_NETFILTER_XT_MATCH_OWNER=m ++CONFIG_NETFILTER_XT_MATCH_POLICY=m ++CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m ++CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m ++CONFIG_NETFILTER_XT_MATCH_QUOTA=m ++CONFIG_NETFILTER_XT_MATCH_RATEEST=m ++CONFIG_NETFILTER_XT_MATCH_REALM=m ++CONFIG_NETFILTER_XT_MATCH_RECENT=m ++CONFIG_NETFILTER_XT_MATCH_SOCKET=m ++CONFIG_NETFILTER_XT_MATCH_STATE=m ++CONFIG_NETFILTER_XT_MATCH_STATISTIC=m ++CONFIG_NETFILTER_XT_MATCH_STRING=m ++CONFIG_NETFILTER_XT_MATCH_TCPMSS=m ++CONFIG_NETFILTER_XT_MATCH_TIME=m ++CONFIG_NETFILTER_XT_MATCH_U32=m ++CONFIG_IP_SET=m ++CONFIG_IP_SET_BITMAP_IP=m ++CONFIG_IP_SET_BITMAP_IPMAC=m ++CONFIG_IP_SET_BITMAP_PORT=m ++CONFIG_IP_SET_HASH_IP=m ++CONFIG_IP_SET_HASH_IPMARK=m ++CONFIG_IP_SET_HASH_IPPORT=m ++CONFIG_IP_SET_HASH_IPPORTIP=m ++CONFIG_IP_SET_HASH_IPPORTNET=m ++CONFIG_IP_SET_HASH_IPMAC=m ++CONFIG_IP_SET_HASH_MAC=m ++CONFIG_IP_SET_HASH_NETPORTNET=m ++CONFIG_IP_SET_HASH_NET=m ++CONFIG_IP_SET_HASH_NETNET=m ++CONFIG_IP_SET_HASH_NETPORT=m ++CONFIG_IP_SET_HASH_NETIFACE=m ++CONFIG_IP_SET_LIST_SET=m ++CONFIG_IP_VS=m ++CONFIG_IP_VS_IPV6=y ++CONFIG_IP_VS_PROTO_TCP=y ++CONFIG_IP_VS_PROTO_UDP=y ++CONFIG_IP_VS_PROTO_ESP=y ++CONFIG_IP_VS_PROTO_AH=y ++CONFIG_IP_VS_PROTO_SCTP=y ++CONFIG_IP_VS_RR=m ++CONFIG_IP_VS_WRR=m ++CONFIG_IP_VS_LC=m ++CONFIG_IP_VS_WLC=m ++CONFIG_IP_VS_FO=m ++CONFIG_IP_VS_OVF=m ++CONFIG_IP_VS_LBLC=m ++CONFIG_IP_VS_LBLCR=m ++CONFIG_IP_VS_DH=m ++CONFIG_IP_VS_SH=m ++CONFIG_IP_VS_MH=m ++CONFIG_IP_VS_SED=m ++CONFIG_IP_VS_NQ=m ++CONFIG_IP_VS_TWOS=m ++CONFIG_IP_VS_FTP=m ++CONFIG_IP_VS_PE_SIP=m ++CONFIG_NFT_DUP_IPV4=m ++CONFIG_NFT_FIB_IPV4=m ++CONFIG_NF_TABLES_ARP=y ++CONFIG_NF_LOG_ARP=m ++CONFIG_NF_LOG_IPV4=m ++CONFIG_IP_NF_IPTABLES=m ++CONFIG_IP_NF_MATCH_AH=m ++CONFIG_IP_NF_MATCH_ECN=m ++CONFIG_IP_NF_MATCH_RPFILTER=m ++CONFIG_IP_NF_MATCH_TTL=m ++CONFIG_IP_NF_FILTER=m ++CONFIG_IP_NF_TARGET_REJECT=m ++CONFIG_IP_NF_TARGET_SYNPROXY=m ++CONFIG_IP_NF_NAT=m ++CONFIG_IP_NF_TARGET_MASQUERADE=m ++CONFIG_IP_NF_TARGET_NETMAP=m ++CONFIG_IP_NF_TARGET_REDIRECT=m ++CONFIG_IP_NF_MANGLE=m ++CONFIG_IP_NF_TARGET_ECN=m ++CONFIG_IP_NF_TARGET_TTL=m ++CONFIG_IP_NF_RAW=m ++CONFIG_IP_NF_SECURITY=m ++CONFIG_IP_NF_ARPTABLES=m ++CONFIG_IP_NF_ARPFILTER=m ++CONFIG_IP_NF_ARP_MANGLE=m ++CONFIG_NFT_DUP_IPV6=m ++CONFIG_NFT_FIB_IPV6=m ++CONFIG_IP6_NF_IPTABLES=m ++CONFIG_IP6_NF_MATCH_AH=m ++CONFIG_IP6_NF_MATCH_EUI64=m ++CONFIG_IP6_NF_MATCH_FRAG=m ++CONFIG_IP6_NF_MATCH_OPTS=m ++CONFIG_IP6_NF_MATCH_HL=m ++CONFIG_IP6_NF_MATCH_IPV6HEADER=m ++CONFIG_IP6_NF_MATCH_MH=m ++CONFIG_IP6_NF_MATCH_RPFILTER=m ++CONFIG_IP6_NF_MATCH_RT=m ++CONFIG_IP6_NF_MATCH_SRH=m ++CONFIG_IP6_NF_TARGET_HL=m ++CONFIG_IP6_NF_FILTER=m ++CONFIG_IP6_NF_TARGET_REJECT=m ++CONFIG_IP6_NF_TARGET_SYNPROXY=m ++CONFIG_IP6_NF_MANGLE=m ++CONFIG_IP6_NF_RAW=m ++CONFIG_IP6_NF_SECURITY=m ++CONFIG_IP6_NF_NAT=m ++CONFIG_IP6_NF_TARGET_MASQUERADE=m ++CONFIG_IP6_NF_TARGET_NPT=m ++CONFIG_NF_TABLES_BRIDGE=m ++CONFIG_NFT_BRIDGE_META=m ++CONFIG_NFT_BRIDGE_REJECT=m ++CONFIG_NF_CONNTRACK_BRIDGE=m ++CONFIG_BRIDGE_NF_EBTABLES=m ++CONFIG_BRIDGE_EBT_BROUTE=m ++CONFIG_BRIDGE_EBT_T_FILTER=m ++CONFIG_BRIDGE_EBT_T_NAT=m ++CONFIG_BRIDGE_EBT_802_3=m ++CONFIG_BRIDGE_EBT_AMONG=m ++CONFIG_BRIDGE_EBT_ARP=m ++CONFIG_BRIDGE_EBT_IP=m ++CONFIG_BRIDGE_EBT_IP6=m ++CONFIG_BRIDGE_EBT_LIMIT=m ++CONFIG_BRIDGE_EBT_MARK=m ++CONFIG_BRIDGE_EBT_PKTTYPE=m ++CONFIG_BRIDGE_EBT_STP=m ++CONFIG_BRIDGE_EBT_VLAN=m ++CONFIG_BRIDGE_EBT_ARPREPLY=m ++CONFIG_BRIDGE_EBT_DNAT=m ++CONFIG_BRIDGE_EBT_MARK_T=m ++CONFIG_BRIDGE_EBT_REDIRECT=m ++CONFIG_BRIDGE_EBT_SNAT=m ++CONFIG_BRIDGE_EBT_LOG=m ++CONFIG_BRIDGE_EBT_NFLOG=m ++CONFIG_IP_DCCP=m ++# CONFIG_IP_DCCP_CCID3 is not set ++CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y ++CONFIG_SCTP_COOKIE_HMAC_MD5=y ++CONFIG_RDS=m ++CONFIG_RDS_RDMA=m ++CONFIG_RDS_TCP=m ++CONFIG_TIPC=m ++CONFIG_TIPC_MEDIA_IB=y ++CONFIG_ATM=m ++CONFIG_ATM_CLIP=m ++CONFIG_ATM_LANE=m ++CONFIG_ATM_MPOA=m ++CONFIG_ATM_BR2684=m ++CONFIG_L2TP=m ++CONFIG_L2TP_DEBUGFS=m ++CONFIG_L2TP_V3=y ++CONFIG_L2TP_IP=m ++CONFIG_L2TP_ETH=m ++CONFIG_BRIDGE=m ++CONFIG_BRIDGE_VLAN_FILTERING=y ++CONFIG_BRIDGE_MRP=y ++CONFIG_BRIDGE_CFM=y ++CONFIG_NET_DSA=m ++CONFIG_NET_DSA_TAG_RZN1_A5PSW=m ++CONFIG_VLAN_8021Q=m ++CONFIG_VLAN_8021Q_GVRP=y ++CONFIG_VLAN_8021Q_MVRP=y ++CONFIG_LLC2=m ++CONFIG_ATALK=m ++CONFIG_X25=m ++CONFIG_LAPB=m ++CONFIG_PHONET=m ++CONFIG_6LOWPAN=m ++CONFIG_IEEE802154=m ++CONFIG_IEEE802154_6LOWPAN=m ++CONFIG_MAC802154=m ++CONFIG_NET_SCHED=y ++CONFIG_NET_SCH_HTB=m ++CONFIG_NET_SCH_HFSC=m ++CONFIG_NET_SCH_PRIO=m ++CONFIG_NET_SCH_MULTIQ=m ++CONFIG_NET_SCH_RED=m ++CONFIG_NET_SCH_SFB=m ++CONFIG_NET_SCH_SFQ=m ++CONFIG_NET_SCH_TEQL=m ++CONFIG_NET_SCH_TBF=m ++CONFIG_NET_SCH_CBS=m ++CONFIG_NET_SCH_ETF=m ++CONFIG_NET_SCH_TAPRIO=m ++CONFIG_NET_SCH_GRED=m ++CONFIG_NET_SCH_NETEM=m ++CONFIG_NET_SCH_DRR=m ++CONFIG_NET_SCH_MQPRIO=m ++CONFIG_NET_SCH_SKBPRIO=m ++CONFIG_NET_SCH_CHOKE=m ++CONFIG_NET_SCH_QFQ=m ++CONFIG_NET_SCH_CODEL=m ++CONFIG_NET_SCH_FQ_CODEL=m ++CONFIG_NET_SCH_CAKE=m ++CONFIG_NET_SCH_FQ=m ++CONFIG_NET_SCH_HHF=m ++CONFIG_NET_SCH_PIE=m ++CONFIG_NET_SCH_FQ_PIE=m ++CONFIG_NET_SCH_INGRESS=m ++CONFIG_NET_SCH_PLUG=m ++CONFIG_NET_SCH_ETS=m ++CONFIG_NET_CLS_BASIC=m ++CONFIG_NET_CLS_ROUTE4=m ++CONFIG_NET_CLS_FW=m ++CONFIG_NET_CLS_U32=m ++CONFIG_CLS_U32_MARK=y ++CONFIG_NET_CLS_FLOW=m ++CONFIG_NET_CLS_CGROUP=m ++CONFIG_NET_CLS_BPF=m ++CONFIG_NET_CLS_FLOWER=m ++CONFIG_NET_CLS_MATCHALL=m ++CONFIG_NET_EMATCH=y ++CONFIG_NET_EMATCH_CMP=m ++CONFIG_NET_EMATCH_NBYTE=m ++CONFIG_NET_EMATCH_U32=m ++CONFIG_NET_EMATCH_META=m ++CONFIG_NET_EMATCH_TEXT=m ++CONFIG_NET_EMATCH_CANID=m ++CONFIG_NET_EMATCH_IPSET=m ++CONFIG_NET_EMATCH_IPT=m ++CONFIG_NET_CLS_ACT=y ++CONFIG_NET_ACT_POLICE=m ++CONFIG_NET_ACT_GACT=m ++CONFIG_GACT_PROB=y ++CONFIG_NET_ACT_MIRRED=m ++CONFIG_NET_ACT_SAMPLE=m ++CONFIG_NET_ACT_IPT=m ++CONFIG_NET_ACT_NAT=m ++CONFIG_NET_ACT_PEDIT=m ++CONFIG_NET_ACT_SIMP=m ++CONFIG_NET_ACT_SKBEDIT=m ++CONFIG_NET_ACT_CSUM=m ++CONFIG_NET_ACT_MPLS=m ++CONFIG_NET_ACT_VLAN=m ++CONFIG_NET_ACT_BPF=m ++CONFIG_NET_ACT_CONNMARK=m ++CONFIG_NET_ACT_CTINFO=m ++CONFIG_NET_ACT_SKBMOD=m ++CONFIG_NET_ACT_TUNNEL_KEY=m ++CONFIG_NET_ACT_CT=m ++CONFIG_NET_ACT_GATE=m ++CONFIG_NET_TC_SKB_EXT=y ++CONFIG_DCB=y ++CONFIG_DNS_RESOLVER=y ++CONFIG_BATMAN_ADV=m ++# CONFIG_BATMAN_ADV_BATMAN_V is not set ++CONFIG_BATMAN_ADV_NC=y ++CONFIG_OPENVSWITCH=m ++CONFIG_VSOCKETS=m ++CONFIG_VIRTIO_VSOCKETS=m ++CONFIG_NETLINK_DIAG=m ++CONFIG_MPLS_ROUTING=m ++CONFIG_MPLS_IPTUNNEL=m ++CONFIG_HSR=m ++CONFIG_QRTR_SMD=m ++CONFIG_QRTR_TUN=m ++CONFIG_NET_NCSI=y ++CONFIG_NCSI_OEM_CMD_GET_MAC=y ++CONFIG_CGROUP_NET_PRIO=y ++CONFIG_BPF_STREAM_PARSER=y ++CONFIG_NET_PKTGEN=m ++CONFIG_NET_DROP_MONITOR=y ++CONFIG_HAMRADIO=y ++CONFIG_AX25=m ++CONFIG_NETROM=m ++CONFIG_ROSE=m ++CONFIG_MKISS=m ++CONFIG_6PACK=m ++CONFIG_BPQETHER=m ++CONFIG_BAYCOM_SER_FDX=m ++CONFIG_BAYCOM_SER_HDX=m ++CONFIG_BAYCOM_PAR=m ++CONFIG_YAM=m ++CONFIG_CAN=m ++CONFIG_CAN_J1939=m ++CONFIG_CAN_ISOTP=m ++CONFIG_BT=m ++CONFIG_BT_RFCOMM=m ++CONFIG_BT_RFCOMM_TTY=y ++CONFIG_BT_BNEP=m ++CONFIG_BT_BNEP_MC_FILTER=y ++CONFIG_BT_BNEP_PROTO_FILTER=y ++CONFIG_BT_CMTP=m ++CONFIG_BT_HIDP=m ++CONFIG_BT_6LOWPAN=m ++CONFIG_BT_LEDS=y ++CONFIG_BT_MSFTEXT=y ++CONFIG_BT_AOSPEXT=y ++CONFIG_BT_HCIBTUSB=m ++CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y ++CONFIG_BT_HCIBTUSB_MTK=y ++CONFIG_BT_HCIBTSDIO=m ++CONFIG_BT_HCIUART=m ++CONFIG_BT_HCIUART_NOKIA=m ++CONFIG_BT_HCIUART_BCSP=y ++CONFIG_BT_HCIUART_ATH3K=y ++CONFIG_BT_HCIUART_LL=y ++CONFIG_BT_HCIUART_INTEL=y ++CONFIG_BT_HCIUART_BCM=y ++CONFIG_BT_HCIUART_RTL=y ++CONFIG_BT_HCIUART_QCA=y ++CONFIG_BT_HCIUART_AG6XX=y ++CONFIG_BT_HCIUART_MRVL=y ++CONFIG_BT_HCIBCM203X=m ++CONFIG_BT_HCIBPA10X=m ++CONFIG_BT_HCIBFUSB=m ++CONFIG_BT_HCIVHCI=m ++CONFIG_BT_MRVL=m ++CONFIG_BT_MRVL_SDIO=m ++CONFIG_BT_ATH3K=m ++CONFIG_BT_MTKSDIO=m ++CONFIG_BT_MTKUART=m ++CONFIG_BT_VIRTIO=m ++CONFIG_AF_RXRPC_IPV6=y ++CONFIG_RXKAD=y ++CONFIG_AF_KCM=m ++CONFIG_MCTP=y ++CONFIG_CFG80211_DEBUGFS=y ++CONFIG_MAC80211=m ++CONFIG_MAC80211_MESH=y ++CONFIG_MAC80211_MESSAGE_TRACING=y ++CONFIG_RFKILL=y ++CONFIG_RFKILL_INPUT=y ++CONFIG_RFKILL_GPIO=m ++CONFIG_NET_9P=m ++CONFIG_NET_9P_VIRTIO=m ++CONFIG_NET_9P_RDMA=m ++CONFIG_CAIF=m ++CONFIG_CAIF_USB=m ++CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y ++CONFIG_NFC=m ++CONFIG_NFC_DIGITAL=m ++CONFIG_NFC_NCI=m ++CONFIG_NFC_NCI_SPI=m ++CONFIG_NFC_NCI_UART=m ++CONFIG_NFC_HCI=m ++CONFIG_NFC_SHDLC=y ++CONFIG_NFC_TRF7970A=m ++CONFIG_NFC_SIM=m ++CONFIG_NFC_PORT100=m ++CONFIG_NFC_VIRTUAL_NCI=m ++CONFIG_NFC_FDP=m ++CONFIG_NFC_FDP_I2C=m ++CONFIG_NFC_PN544_I2C=m ++CONFIG_NFC_PN533_USB=m ++CONFIG_NFC_PN533_I2C=m ++CONFIG_NFC_PN532_UART=m ++CONFIG_NFC_MICROREAD_I2C=m ++CONFIG_NFC_MRVL_USB=m ++CONFIG_NFC_MRVL_UART=m ++CONFIG_NFC_MRVL_I2C=m ++CONFIG_NFC_MRVL_SPI=m ++CONFIG_NFC_ST21NFCA_I2C=m ++CONFIG_NFC_ST_NCI_I2C=m ++CONFIG_NFC_ST_NCI_SPI=m ++CONFIG_NFC_NXP_NCI=m ++CONFIG_NFC_NXP_NCI_I2C=m ++CONFIG_NFC_S3FWRN5_I2C=m ++CONFIG_NFC_S3FWRN82_UART=m ++CONFIG_NFC_ST95HF=m ++CONFIG_NET_IFE=m ++CONFIG_PAGE_POOL_STATS=y ++CONFIG_PCI=y ++CONFIG_PCIEAER=y ++CONFIG_PCIE_DPC=y ++CONFIG_PCIE_PTM=y ++CONFIG_PCI_REALLOC_ENABLE_AUTO=y ++CONFIG_PCI_STUB=m ++CONFIG_PCI_PF_STUB=m ++CONFIG_PCI_IOV=y ++CONFIG_PCI_PRI=y ++CONFIG_PCI_PASID=y ++CONFIG_HOTPLUG_PCI_CPCI=y ++CONFIG_HOTPLUG_PCI_SHPC=y ++CONFIG_PCI_FTPCI100=y ++CONFIG_PCI_HOST_GENERIC=y ++CONFIG_PCIE_MICROCHIP_HOST=y ++CONFIG_PCIE_XILINX=y ++CONFIG_PCIE_CADENCE_PLAT_HOST=y ++CONFIG_PCIE_CADENCE_PLAT_EP=y ++CONFIG_PCIE_DW_PLAT_HOST=y ++CONFIG_PCIE_DW_PLAT_EP=y ++CONFIG_PCIE_FU740=y ++CONFIG_PCIE_ULTRARISC=y ++CONFIG_PCI_ENDPOINT=y ++CONFIG_PCI_ENDPOINT_CONFIGFS=y ++CONFIG_PCI_EPF_NTB=m ++CONFIG_PCI_EPF_VNTB=m ++CONFIG_CXL_BUS=m ++CONFIG_RAPIDIO=y ++CONFIG_RAPIDIO_TSI721=m ++CONFIG_RAPIDIO_DMA_ENGINE=y ++CONFIG_RAPIDIO_ENUM_BASIC=m ++CONFIG_RAPIDIO_CHMAN=m ++CONFIG_RAPIDIO_MPORT_CDEV=m ++CONFIG_RAPIDIO_CPS_XX=m ++CONFIG_RAPIDIO_CPS_GEN2=m ++CONFIG_RAPIDIO_RXS_GEN3=m ++CONFIG_UEVENT_HELPER=y ++CONFIG_DEVTMPFS=y ++CONFIG_DEVTMPFS_MOUNT=y ++CONFIG_DEVTMPFS_SAFE=y ++CONFIG_FW_LOADER_COMPRESS=y ++CONFIG_FW_LOADER_COMPRESS_ZSTD=y ++CONFIG_MOXTET=m ++CONFIG_MHI_BUS_PCI_GENERIC=m ++CONFIG_MHI_BUS_EP=m ++CONFIG_CONNECTOR=y ++CONFIG_FIRMWARE_MEMMAP=y ++CONFIG_SYSFB_SIMPLEFB=y ++CONFIG_EFI_VARS_PSTORE=m ++CONFIG_EFI_BOOTLOADER_CONTROL=m ++CONFIG_EFI_CAPSULE_LOADER=m ++CONFIG_EFI_TEST=m ++CONFIG_RESET_ATTACK_MITIGATION=y ++CONFIG_EFI_COCO_SECRET=y ++CONFIG_GNSS=m ++CONFIG_GNSS_MTK_SERIAL=m ++CONFIG_GNSS_SIRF_SERIAL=m ++CONFIG_GNSS_UBX_SERIAL=m ++CONFIG_GNSS_USB=m ++CONFIG_MTD=m ++CONFIG_MTD_CMDLINE_PARTS=m ++CONFIG_MTD_REDBOOT_PARTS=m ++CONFIG_MTD_BLOCK=m ++CONFIG_MTD_BLOCK_RO=m ++CONFIG_FTL=m ++CONFIG_NFTL=m ++CONFIG_NFTL_RW=y ++CONFIG_INFTL=m ++CONFIG_RFD_FTL=m ++CONFIG_SSFDC=m ++CONFIG_SM_FTL=m ++CONFIG_MTD_OOPS=m ++CONFIG_MTD_PSTORE=m ++CONFIG_MTD_SWAP=m ++CONFIG_MTD_JEDECPROBE=m ++CONFIG_MTD_CFI_INTELEXT=m ++CONFIG_MTD_CFI_STAA=m ++CONFIG_MTD_ROM=m ++CONFIG_MTD_ABSENT=m ++CONFIG_MTD_PHYSMAP=m ++CONFIG_MTD_PHYSMAP_OF=y ++CONFIG_MTD_PHYSMAP_GPIO_ADDR=y ++CONFIG_MTD_PCI=m ++CONFIG_MTD_INTEL_VR_NOR=m ++CONFIG_MTD_PLATRAM=m ++CONFIG_MTD_PMC551=m ++CONFIG_MTD_DATAFLASH=m ++CONFIG_MTD_DATAFLASH_OTP=y ++CONFIG_MTD_MCHP23K256=m ++CONFIG_MTD_MCHP48L640=m ++CONFIG_MTD_SST25L=m ++CONFIG_MTD_SLRAM=m ++CONFIG_MTD_PHRAM=m ++CONFIG_MTD_MTDRAM=m ++CONFIG_MTD_BLOCK2MTD=m ++CONFIG_MTD_ONENAND=m ++CONFIG_MTD_ONENAND_VERIFY_WRITE=y ++CONFIG_MTD_ONENAND_GENERIC=m ++CONFIG_MTD_ONENAND_2X_PROGRAM=y ++CONFIG_MTD_RAW_NAND=m ++CONFIG_MTD_NAND_DENALI_PCI=m ++CONFIG_MTD_NAND_DENALI_DT=m ++CONFIG_MTD_NAND_CAFE=m ++CONFIG_MTD_NAND_MXIC=m ++CONFIG_MTD_NAND_GPIO=m ++CONFIG_MTD_NAND_PLATFORM=m ++CONFIG_MTD_NAND_CADENCE=m ++CONFIG_MTD_NAND_ARASAN=m ++CONFIG_MTD_NAND_INTEL_LGM=m ++CONFIG_MTD_NAND_NANDSIM=m ++CONFIG_MTD_NAND_RICOH=m ++CONFIG_MTD_NAND_DISKONCHIP=m ++CONFIG_MTD_SPI_NAND=m ++CONFIG_MTD_NAND_ECC_SW_BCH=y ++CONFIG_MTD_LPDDR=m ++CONFIG_MTD_SPI_NOR=m ++CONFIG_MTD_UBI=m ++CONFIG_MTD_UBI_FASTMAP=y ++CONFIG_MTD_UBI_GLUEBI=m ++CONFIG_MTD_UBI_BLOCK=y ++CONFIG_MTD_HYPERBUS=m ++CONFIG_OF_OVERLAY=y ++CONFIG_PARPORT=m ++CONFIG_PARPORT_PC=m ++CONFIG_PARPORT_SERIAL=m ++CONFIG_PARPORT_PC_FIFO=y ++CONFIG_PARPORT_1284=y ++CONFIG_BLK_DEV_NULL_BLK=m ++CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m ++CONFIG_ZRAM=m ++CONFIG_ZRAM_WRITEBACK=y ++CONFIG_ZRAM_MEMORY_TRACKING=y ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_BLK_DEV_DRBD=m ++CONFIG_BLK_DEV_NBD=m ++CONFIG_BLK_DEV_RAM=m ++CONFIG_BLK_DEV_RAM_SIZE=65536 ++CONFIG_ATA_OVER_ETH=m ++CONFIG_VIRTIO_BLK=y ++CONFIG_BLK_DEV_RBD=m ++CONFIG_BLK_DEV_UBLK=m ++CONFIG_BLK_DEV_RNBD_CLIENT=m ++CONFIG_BLK_DEV_RNBD_SERVER=m ++CONFIG_BLK_DEV_NVME=y ++CONFIG_NVME_MULTIPATH=y ++CONFIG_NVME_HWMON=y ++CONFIG_NVME_RDMA=m ++CONFIG_NVME_FC=m ++CONFIG_NVME_TCP=m ++CONFIG_NVME_TARGET=m ++CONFIG_NVME_TARGET_PASSTHRU=y ++CONFIG_NVME_TARGET_LOOP=m ++CONFIG_NVME_TARGET_RDMA=m ++CONFIG_NVME_TARGET_FC=m ++CONFIG_NVME_TARGET_TCP=m ++CONFIG_NVME_TARGET_AUTH=y ++CONFIG_AD525X_DPOT=m ++CONFIG_AD525X_DPOT_I2C=m ++CONFIG_AD525X_DPOT_SPI=m ++CONFIG_DUMMY_IRQ=m ++CONFIG_PHANTOM=m ++CONFIG_ICS932S401=m ++CONFIG_ENCLOSURE_SERVICES=m ++CONFIG_HI6421V600_IRQ=m ++CONFIG_HP_ILO=m ++CONFIG_APDS9802ALS=m ++CONFIG_ISL29003=m ++CONFIG_ISL29020=m ++CONFIG_SENSORS_TSL2550=m ++CONFIG_SENSORS_BH1770=m ++CONFIG_SENSORS_APDS990X=m ++CONFIG_HMC6352=m ++CONFIG_DS1682=m ++CONFIG_LATTICE_ECP3_CONFIG=m ++CONFIG_SRAM=y ++CONFIG_DW_XDATA_PCIE=m ++CONFIG_XILINX_SDFEC=m ++CONFIG_OPEN_DICE=m ++CONFIG_VCPU_STALL_DETECTOR=m ++CONFIG_C2PORT=m ++CONFIG_EEPROM_AT24=m ++CONFIG_EEPROM_AT25=m ++CONFIG_EEPROM_MAX6875=m ++CONFIG_EEPROM_93XX46=m ++CONFIG_EEPROM_IDT_89HPESX=m ++CONFIG_EEPROM_EE1004=m ++CONFIG_TI_ST=m ++CONFIG_SENSORS_LIS3_I2C=m ++CONFIG_GENWQE=m ++CONFIG_ECHO=m ++CONFIG_BCM_VK=m ++CONFIG_BCM_VK_TTY=y ++CONFIG_MISC_ALCOR_PCI=m ++CONFIG_MISC_RTSX_PCI=m ++CONFIG_MISC_RTSX_USB=m ++CONFIG_UACCE=m ++CONFIG_PVPANIC=y ++CONFIG_PVPANIC_MMIO=m ++CONFIG_PVPANIC_PCI=m ++CONFIG_GP_PCI1XXXX=m ++CONFIG_BLK_DEV_SD=y ++CONFIG_CHR_DEV_ST=m ++CONFIG_BLK_DEV_SR=y ++CONFIG_CHR_DEV_SG=y ++CONFIG_CHR_DEV_SCH=m ++CONFIG_SCSI_ENCLOSURE=m ++CONFIG_SCSI_CONSTANTS=y ++CONFIG_SCSI_LOGGING=y ++CONFIG_SCSI_SCAN_ASYNC=y ++CONFIG_SCSI_FC_ATTRS=m ++CONFIG_SCSI_SAS_ATA=y ++CONFIG_ISCSI_TCP=m ++CONFIG_SCSI_CXGB3_ISCSI=m ++CONFIG_SCSI_CXGB4_ISCSI=m ++CONFIG_SCSI_BNX2_ISCSI=m ++CONFIG_SCSI_BNX2X_FCOE=m ++CONFIG_BE2ISCSI=m ++CONFIG_BLK_DEV_3W_XXXX_RAID=m ++CONFIG_SCSI_HPSA=m ++CONFIG_SCSI_3W_9XXX=m ++CONFIG_SCSI_3W_SAS=m ++CONFIG_SCSI_ACARD=m ++CONFIG_SCSI_AACRAID=m ++CONFIG_SCSI_AIC7XXX=m ++CONFIG_AIC7XXX_CMDS_PER_DEVICE=8 ++# CONFIG_AIC7XXX_DEBUG_ENABLE is not set ++CONFIG_SCSI_AIC79XX=m ++# CONFIG_AIC79XX_DEBUG_ENABLE is not set ++CONFIG_SCSI_AIC94XX=m ++# CONFIG_AIC94XX_DEBUG is not set ++CONFIG_SCSI_MVSAS=m ++# CONFIG_SCSI_MVSAS_DEBUG is not set ++CONFIG_SCSI_MVUMI=m ++CONFIG_SCSI_ADVANSYS=m ++CONFIG_SCSI_ARCMSR=m ++CONFIG_SCSI_ESAS2R=m ++CONFIG_MEGARAID_NEWGEN=y ++CONFIG_MEGARAID_MM=m ++CONFIG_MEGARAID_MAILBOX=m ++CONFIG_MEGARAID_LEGACY=m ++CONFIG_MEGARAID_SAS=m ++CONFIG_SCSI_MPT2SAS=m ++CONFIG_SCSI_MPI3MR=m ++CONFIG_SCSI_SMARTPQI=m ++CONFIG_SCSI_HPTIOP=m ++CONFIG_SCSI_BUSLOGIC=m ++CONFIG_SCSI_FLASHPOINT=y ++CONFIG_SCSI_MYRB=m ++CONFIG_SCSI_MYRS=m ++CONFIG_LIBFC=m ++CONFIG_LIBFCOE=m ++CONFIG_FCOE=m ++CONFIG_SCSI_SNIC=m ++CONFIG_SCSI_DMX3191D=m ++CONFIG_SCSI_FDOMAIN_PCI=m ++CONFIG_SCSI_IPS=m ++CONFIG_SCSI_INITIO=m ++CONFIG_SCSI_INIA100=m ++CONFIG_SCSI_PPA=m ++CONFIG_SCSI_IMM=m ++CONFIG_SCSI_STEX=m ++CONFIG_SCSI_SYM53C8XX_2=m ++CONFIG_SCSI_IPR=m ++CONFIG_SCSI_QLOGIC_1280=m ++CONFIG_SCSI_QLA_FC=m ++CONFIG_TCM_QLA2XXX=m ++CONFIG_SCSI_QLA_ISCSI=m ++CONFIG_QEDI=m ++CONFIG_QEDF=m ++CONFIG_SCSI_EFCT=m ++CONFIG_SCSI_DC395x=m ++CONFIG_SCSI_AM53C974=m ++CONFIG_SCSI_WD719X=m ++CONFIG_SCSI_DEBUG=m ++CONFIG_SCSI_PMCRAID=m ++CONFIG_SCSI_PM8001=m ++CONFIG_SCSI_BFA_FC=m ++CONFIG_SCSI_VIRTIO=y ++CONFIG_SCSI_CHELSIO_FCOE=m ++CONFIG_SCSI_DH=y ++CONFIG_SCSI_DH_RDAC=m ++CONFIG_SCSI_DH_HP_SW=m ++CONFIG_SCSI_DH_EMC=m ++CONFIG_SCSI_DH_ALUA=m ++CONFIG_ATA=y ++CONFIG_SATA_AHCI=y ++CONFIG_SATA_MOBILE_LPM_POLICY=3 ++CONFIG_SATA_AHCI_PLATFORM=y ++CONFIG_AHCI_DWC=m ++CONFIG_AHCI_CEVA=m ++CONFIG_SATA_INIC162X=m ++CONFIG_SATA_ACARD_AHCI=m ++CONFIG_SATA_SIL24=m ++CONFIG_PDC_ADMA=m ++CONFIG_SATA_QSTOR=m ++CONFIG_SATA_SX4=m ++CONFIG_ATA_PIIX=m ++CONFIG_SATA_DWC=m ++CONFIG_SATA_DWC_OLD_DMA=y ++CONFIG_SATA_MV=m ++CONFIG_SATA_NV=m ++CONFIG_SATA_PROMISE=m ++CONFIG_SATA_SIL=m ++CONFIG_SATA_SIS=m ++CONFIG_SATA_SVW=m ++CONFIG_SATA_ULI=m ++CONFIG_SATA_VIA=m ++CONFIG_SATA_VITESSE=m ++CONFIG_PATA_ALI=m ++CONFIG_PATA_AMD=m ++CONFIG_PATA_ARTOP=m ++CONFIG_PATA_ATIIXP=m ++CONFIG_PATA_ATP867X=m ++CONFIG_PATA_CMD64X=m ++CONFIG_PATA_CYPRESS=m ++CONFIG_PATA_EFAR=m ++CONFIG_PATA_HPT366=m ++CONFIG_PATA_HPT37X=m ++CONFIG_PATA_HPT3X2N=m ++CONFIG_PATA_HPT3X3=m ++CONFIG_PATA_IT8213=m ++CONFIG_PATA_IT821X=m ++CONFIG_PATA_JMICRON=m ++CONFIG_PATA_MARVELL=m ++CONFIG_PATA_NETCELL=m ++CONFIG_PATA_NINJA32=m ++CONFIG_PATA_NS87415=m ++CONFIG_PATA_OLDPIIX=m ++CONFIG_PATA_OPTIDMA=m ++CONFIG_PATA_PDC2027X=m ++CONFIG_PATA_PDC_OLD=m ++CONFIG_PATA_RADISYS=m ++CONFIG_PATA_RDC=m ++CONFIG_PATA_SCH=m ++CONFIG_PATA_SERVERWORKS=m ++CONFIG_PATA_SIL680=m ++CONFIG_PATA_TOSHIBA=m ++CONFIG_PATA_TRIFLEX=m ++CONFIG_PATA_VIA=m ++CONFIG_PATA_WINBOND=m ++CONFIG_PATA_CMD640_PCI=m ++CONFIG_PATA_MPIIX=m ++CONFIG_PATA_NS87410=m ++CONFIG_PATA_OPTI=m ++CONFIG_PATA_OF_PLATFORM=m ++CONFIG_PATA_RZ1000=m ++CONFIG_ATA_GENERIC=m ++CONFIG_PATA_LEGACY=m ++CONFIG_MD=y ++CONFIG_BLK_DEV_MD=y ++CONFIG_MD_CLUSTER=m ++CONFIG_BCACHE=m ++CONFIG_BCACHE_ASYNC_REGISTRATION=y ++CONFIG_BLK_DEV_DM=y ++CONFIG_DM_UNSTRIPED=m ++CONFIG_DM_CRYPT=m ++CONFIG_DM_SNAPSHOT=m ++CONFIG_DM_THIN_PROVISIONING=m ++CONFIG_DM_CACHE=m ++CONFIG_DM_WRITECACHE=m ++CONFIG_DM_ERA=m ++CONFIG_DM_CLONE=m ++CONFIG_DM_MIRROR=m ++CONFIG_DM_LOG_USERSPACE=m ++CONFIG_DM_RAID=m ++CONFIG_DM_ZERO=m ++CONFIG_DM_MULTIPATH=m ++CONFIG_DM_MULTIPATH_QL=m ++CONFIG_DM_MULTIPATH_ST=m ++CONFIG_DM_MULTIPATH_HST=m ++CONFIG_DM_MULTIPATH_IOA=m ++CONFIG_DM_DELAY=m ++CONFIG_DM_INIT=y ++CONFIG_DM_UEVENT=y ++CONFIG_DM_FLAKEY=m ++CONFIG_DM_VERITY=m ++CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y ++CONFIG_DM_SWITCH=m ++CONFIG_DM_LOG_WRITES=m ++CONFIG_DM_INTEGRITY=m ++CONFIG_DM_ZONED=m ++CONFIG_TARGET_CORE=m ++CONFIG_TCM_IBLOCK=m ++CONFIG_TCM_FILEIO=m ++CONFIG_TCM_PSCSI=m ++CONFIG_TCM_USER2=m ++CONFIG_LOOPBACK_TARGET=m ++CONFIG_TCM_FC=m ++CONFIG_ISCSI_TARGET=m ++CONFIG_ISCSI_TARGET_CXGB4=m ++CONFIG_SBP_TARGET=m ++CONFIG_FUSION=y ++CONFIG_FUSION_SPI=m ++CONFIG_FUSION_FC=m ++CONFIG_FUSION_SAS=m ++CONFIG_FUSION_CTL=m ++CONFIG_FUSION_LAN=m ++CONFIG_FUSION_LOGGING=y ++CONFIG_FIREWIRE=m ++CONFIG_FIREWIRE_OHCI=m ++CONFIG_FIREWIRE_SBP2=m ++CONFIG_FIREWIRE_NET=m ++CONFIG_FIREWIRE_NOSY=m ++CONFIG_BONDING=m ++CONFIG_DUMMY=m ++CONFIG_WIREGUARD=m ++CONFIG_EQUALIZER=m ++CONFIG_NET_FC=y ++CONFIG_IFB=m ++CONFIG_NET_TEAM=m ++CONFIG_NET_TEAM_MODE_BROADCAST=m ++CONFIG_NET_TEAM_MODE_ROUNDROBIN=m ++CONFIG_NET_TEAM_MODE_RANDOM=m ++CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m ++CONFIG_NET_TEAM_MODE_LOADBALANCE=m ++CONFIG_MACVLAN=m ++CONFIG_MACVTAP=m ++CONFIG_IPVLAN=m ++CONFIG_IPVTAP=m ++CONFIG_VXLAN=m ++CONFIG_GENEVE=m ++CONFIG_BAREUDP=m ++CONFIG_GTP=m ++CONFIG_AMT=m ++CONFIG_MACSEC=m ++CONFIG_NETCONSOLE=m ++CONFIG_NETCONSOLE_DYNAMIC=y ++CONFIG_NTB_NETDEV=m ++CONFIG_RIONET=m ++CONFIG_TUN=y ++CONFIG_VETH=m ++CONFIG_VIRTIO_NET=y ++CONFIG_NLMON=m ++CONFIG_NET_VRF=m ++CONFIG_VSOCKMON=m ++CONFIG_MHI_NET=m ++CONFIG_ARCNET=m ++CONFIG_ARCNET_1201=m ++CONFIG_ARCNET_1051=m ++CONFIG_ARCNET_RAW=m ++CONFIG_ARCNET_CAP=m ++CONFIG_ARCNET_COM90xx=m ++CONFIG_ARCNET_COM90xxIO=m ++CONFIG_ARCNET_RIM_I=m ++CONFIG_ARCNET_COM20020=m ++CONFIG_ARCNET_COM20020_PCI=m ++CONFIG_ATM_DUMMY=m ++CONFIG_ATM_TCP=m ++CONFIG_ATM_LANAI=m ++CONFIG_ATM_ENI=m ++CONFIG_ATM_NICSTAR=m ++CONFIG_ATM_IDT77252=m ++CONFIG_ATM_IA=m ++CONFIG_ATM_FORE200E=m ++CONFIG_ATM_HE=m ++CONFIG_ATM_HE_USE_SUNI=y ++CONFIG_ATM_SOLOS=m ++CONFIG_CAIF_DRIVERS=y ++CONFIG_CAIF_TTY=m ++CONFIG_CAIF_VIRTIO=m ++CONFIG_B53_SPI_DRIVER=m ++CONFIG_B53_MDIO_DRIVER=m ++CONFIG_B53_MMAP_DRIVER=m ++CONFIG_B53_SRAB_DRIVER=m ++CONFIG_B53_SERDES=m ++CONFIG_NET_DSA_BCM_SF2=m ++CONFIG_NET_DSA_HIRSCHMANN_HELLCREEK=m ++CONFIG_NET_DSA_LANTIQ_GSWIP=m ++CONFIG_NET_DSA_MT7530=m ++CONFIG_NET_DSA_MV88E6060=m ++CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON=m ++CONFIG_NET_DSA_MICROCHIP_KSZ9477_I2C=m ++CONFIG_NET_DSA_MICROCHIP_KSZ_SPI=m ++CONFIG_NET_DSA_MICROCHIP_KSZ8863_SMI=m ++CONFIG_NET_DSA_MV88E6XXX=m ++CONFIG_NET_DSA_MV88E6XXX_PTP=y ++CONFIG_NET_DSA_MSCC_SEVILLE=m ++CONFIG_NET_DSA_AR9331=m ++CONFIG_NET_DSA_QCA8K=m ++CONFIG_NET_DSA_SJA1105=m ++CONFIG_NET_DSA_SJA1105_PTP=y ++CONFIG_NET_DSA_SJA1105_TAS=y ++CONFIG_NET_DSA_SJA1105_VL=y ++CONFIG_NET_DSA_XRS700X_I2C=m ++CONFIG_NET_DSA_XRS700X_MDIO=m ++CONFIG_NET_DSA_REALTEK=m ++CONFIG_NET_DSA_REALTEK_RTL8365MB=m ++CONFIG_NET_DSA_REALTEK_RTL8366RB=m ++CONFIG_NET_DSA_SMSC_LAN9303_I2C=m ++CONFIG_NET_DSA_SMSC_LAN9303_MDIO=m ++CONFIG_NET_DSA_VITESSE_VSC73XX_SPI=m ++CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM=m ++CONFIG_VORTEX=m ++CONFIG_TYPHOON=m ++CONFIG_ADAPTEC_STARFIRE=m ++CONFIG_ET131X=m ++CONFIG_SLICOSS=m ++CONFIG_ACENIC=m ++CONFIG_ALTERA_TSE=m ++CONFIG_ENA_ETHERNET=m ++CONFIG_AMD8111_ETH=m ++CONFIG_PCNET32=m ++CONFIG_AQTION=m ++CONFIG_SPI_AX88796C=m ++CONFIG_ATL2=m ++CONFIG_ATL1=m ++CONFIG_ATL1E=m ++CONFIG_ATL1C=m ++CONFIG_ALX=m ++CONFIG_B44=m ++CONFIG_BCMGENET=m ++CONFIG_TIGON3=m ++CONFIG_BNX2X=m ++CONFIG_SYSTEMPORT=m ++CONFIG_BNXT=m ++CONFIG_BNXT_DCB=y ++CONFIG_MACB=m ++CONFIG_MACB_PCI=m ++CONFIG_THUNDER_NIC_PF=m ++CONFIG_THUNDER_NIC_VF=m ++CONFIG_LIQUIDIO=m ++CONFIG_LIQUIDIO_VF=m ++CONFIG_CHELSIO_T1=m ++CONFIG_CHELSIO_T1_1G=y ++CONFIG_CHELSIO_T4_DCB=y ++CONFIG_CHELSIO_T4_FCOE=y ++CONFIG_CHELSIO_T4VF=m ++CONFIG_CHELSIO_IPSEC_INLINE=m ++CONFIG_CHELSIO_TLS_DEVICE=m ++CONFIG_ENIC=m ++CONFIG_GEMINI_ETHERNET=m ++CONFIG_DM9051=m ++CONFIG_DNET=m ++CONFIG_NET_TULIP=y ++CONFIG_DE2104X=m ++CONFIG_TULIP=m ++CONFIG_WINBOND_840=m ++CONFIG_DM9102=m ++CONFIG_ULI526X=m ++CONFIG_DL2K=m ++CONFIG_SUNDANCE=m ++CONFIG_TSNEP=m ++CONFIG_EZCHIP_NPS_MANAGEMENT_ENET=m ++CONFIG_FUN_ETH=m ++CONFIG_E100=m ++CONFIG_E1000=m ++CONFIG_E1000E=m ++CONFIG_IGB=m ++CONFIG_IGBVF=m ++CONFIG_IXGBE=m ++CONFIG_IXGBE_DCB=y ++CONFIG_IXGBEVF=m ++CONFIG_I40E=m ++CONFIG_I40E_DCB=y ++CONFIG_I40EVF=m ++CONFIG_ICE=m ++CONFIG_FM10K=m ++CONFIG_IGC=m ++CONFIG_JME=m ++CONFIG_ADIN1110=m ++CONFIG_LITEX_LITEETH=m ++CONFIG_MVMDIO=m ++CONFIG_SKGE=m ++CONFIG_SKGE_GENESIS=y ++CONFIG_SKY2=m ++CONFIG_OCTEON_EP=m ++CONFIG_PRESTERA=m ++CONFIG_MLX4_EN=m ++CONFIG_MLX5_CORE=m ++CONFIG_MLX5_FPGA=y ++CONFIG_MLX5_CORE_EN=y ++CONFIG_MLX5_CORE_IPOIB=y ++CONFIG_MLX5_EN_IPSEC=y ++CONFIG_MLX5_EN_TLS=y ++CONFIG_MLX5_SF=y ++CONFIG_MLXSW_CORE=m ++CONFIG_KS8842=m ++CONFIG_KS8851=m ++CONFIG_KS8851_MLL=m ++CONFIG_KSZ884X_PCI=m ++CONFIG_ENC28J60=m ++CONFIG_ENCX24J600=m ++CONFIG_LAN743X=m ++CONFIG_LAN966X_SWITCH=m ++CONFIG_MSCC_OCELOT_SWITCH=m ++CONFIG_MYRI10GE=m ++CONFIG_FEALNX=m ++CONFIG_NI_XGE_MANAGEMENT_ENET=m ++CONFIG_NATSEMI=m ++CONFIG_NS83820=m ++CONFIG_S2IO=m ++CONFIG_NFP=m ++CONFIG_NE2K_PCI=m ++CONFIG_FORCEDETH=m ++CONFIG_ETHOC=m ++CONFIG_HAMACHI=m ++CONFIG_YELLOWFIN=m ++CONFIG_IONIC=m ++CONFIG_QLA3XXX=m ++CONFIG_QLCNIC=m ++CONFIG_NETXEN_NIC=m ++CONFIG_QED=m ++CONFIG_QEDE=m ++CONFIG_BNA=m ++CONFIG_QCA7000_SPI=m ++CONFIG_QCA7000_UART=m ++CONFIG_QCOM_EMAC=m ++CONFIG_RMNET=m ++CONFIG_R6040=m ++CONFIG_8139CP=m ++CONFIG_8139TOO=m ++CONFIG_8139TOO_8129=y ++CONFIG_R8169=m ++CONFIG_ROCKER=m ++CONFIG_SXGBE_ETH=m ++CONFIG_SC92031=m ++CONFIG_SIS900=m ++CONFIG_SIS190=m ++CONFIG_SFC=m ++CONFIG_SFC_FALCON=m ++CONFIG_SFC_SIENA=m ++CONFIG_SFC_SIENA_SRIOV=y ++CONFIG_EPIC100=m ++CONFIG_SMSC911X=m ++CONFIG_SMSC9420=m ++CONFIG_STMMAC_ETH=y ++CONFIG_DWMAC_GENERIC=m ++CONFIG_DWMAC_ULTRARISC=m ++CONFIG_HAPPYMEAL=m ++CONFIG_SUNGEM=m ++CONFIG_CASSINI=m ++CONFIG_NIU=m ++CONFIG_DWC_XLGMAC=m ++CONFIG_DWC_XLGMAC_PCI=m ++CONFIG_TEHUTI=m ++CONFIG_TLAN=m ++CONFIG_MSE102X=m ++CONFIG_VIA_RHINE=m ++CONFIG_VIA_RHINE_MMIO=y ++CONFIG_VIA_VELOCITY=m ++CONFIG_NGBE=m ++CONFIG_TXGBE=m ++CONFIG_WIZNET_W5100=m ++CONFIG_WIZNET_W5300=m ++CONFIG_WIZNET_W5100_SPI=m ++CONFIG_XILINX_EMACLITE=m ++CONFIG_XILINX_AXI_EMAC=m ++CONFIG_XILINX_LL_TEMAC=m ++CONFIG_FDDI=y ++CONFIG_DEFXX=m ++CONFIG_SKFP=m ++CONFIG_LED_TRIGGER_PHY=y ++CONFIG_AMD_PHY=m ++CONFIG_ADIN_PHY=m ++CONFIG_ADIN1100_PHY=m ++CONFIG_AQUANTIA_PHY=m ++CONFIG_BROADCOM_PHY=m ++CONFIG_BCM54140_PHY=m ++CONFIG_BCM84881_PHY=m ++CONFIG_BCM87XX_PHY=m ++CONFIG_CICADA_PHY=m ++CONFIG_CORTINA_PHY=m ++CONFIG_DAVICOM_PHY=m ++CONFIG_ICPLUS_PHY=m ++CONFIG_LXT_PHY=m ++CONFIG_INTEL_XWAY_PHY=m ++CONFIG_LSI_ET1011C_PHY=m ++CONFIG_MARVELL_PHY=m ++CONFIG_MARVELL_88X2222_PHY=m ++CONFIG_MAXLINEAR_GPHY=m ++CONFIG_MICROCHIP_T1_PHY=m ++CONFIG_MICROSEMI_PHY=m ++CONFIG_MOTORCOMM_PHY=m ++CONFIG_NATIONAL_PHY=m ++CONFIG_NXP_C45_TJA11XX_PHY=m ++CONFIG_NXP_TJA11XX_PHY=m ++CONFIG_AT803X_PHY=m ++CONFIG_QSEMI_PHY=m ++CONFIG_RENESAS_PHY=m ++CONFIG_ROCKCHIP_PHY=m ++CONFIG_STE10XP=m ++CONFIG_TERANETICS_PHY=m ++CONFIG_DP83822_PHY=m ++CONFIG_DP83TC811_PHY=m ++CONFIG_DP83848_PHY=m ++CONFIG_DP83867_PHY=m ++CONFIG_DP83869_PHY=m ++CONFIG_DP83TD510_PHY=m ++CONFIG_XILINX_GMII2RGMII=m ++CONFIG_MICREL_KS8995MA=m ++CONFIG_PSE_CONTROLLER=y ++CONFIG_PSE_REGULATOR=m ++CONFIG_CAN_VCAN=m ++CONFIG_CAN_VXCAN=m ++CONFIG_CAN_CAN327=m ++CONFIG_CAN_FLEXCAN=m ++CONFIG_CAN_GRCAN=m ++CONFIG_CAN_JANZ_ICAN3=m ++CONFIG_CAN_KVASER_PCIEFD=m ++CONFIG_CAN_SLCAN=m ++CONFIG_CAN_C_CAN=m ++CONFIG_CAN_C_CAN_PLATFORM=m ++CONFIG_CAN_C_CAN_PCI=m ++CONFIG_CAN_CC770=m ++CONFIG_CAN_CC770_ISA=m ++CONFIG_CAN_CC770_PLATFORM=m ++CONFIG_CAN_CTUCANFD_PCI=m ++CONFIG_CAN_CTUCANFD_PLATFORM=m ++CONFIG_CAN_IFI_CANFD=m ++CONFIG_CAN_M_CAN=m ++CONFIG_CAN_M_CAN_PCI=m ++CONFIG_CAN_M_CAN_PLATFORM=m ++CONFIG_CAN_M_CAN_TCAN4X5X=m ++CONFIG_CAN_PEAK_PCIEFD=m ++CONFIG_CAN_SJA1000=m ++CONFIG_CAN_EMS_PCI=m ++CONFIG_CAN_F81601=m ++CONFIG_CAN_KVASER_PCI=m ++CONFIG_CAN_PEAK_PCI=m ++CONFIG_CAN_PLX_PCI=m ++CONFIG_CAN_SJA1000_ISA=m ++CONFIG_CAN_SJA1000_PLATFORM=m ++CONFIG_CAN_SOFTING=m ++CONFIG_CAN_HI311X=m ++CONFIG_CAN_MCP251X=m ++CONFIG_CAN_MCP251XFD=m ++CONFIG_CAN_8DEV_USB=m ++CONFIG_CAN_EMS_USB=m ++CONFIG_CAN_ESD_USB=m ++CONFIG_CAN_ETAS_ES58X=m ++CONFIG_CAN_GS_USB=m ++CONFIG_CAN_KVASER_USB=m ++CONFIG_CAN_MCBA_USB=m ++CONFIG_CAN_PEAK_USB=m ++CONFIG_CAN_UCAN=m ++CONFIG_MCTP_SERIAL=m ++CONFIG_MCTP_TRANSPORT_I2C=m ++CONFIG_MDIO_GPIO=m ++CONFIG_MDIO_HISI_FEMAC=m ++CONFIG_MDIO_MVUSB=m ++CONFIG_MDIO_OCTEON=m ++CONFIG_MDIO_IPQ4019=m ++CONFIG_MDIO_IPQ8064=m ++CONFIG_MDIO_BUS_MUX_GPIO=m ++CONFIG_MDIO_BUS_MUX_MULTIPLEXER=m ++CONFIG_MDIO_BUS_MUX_MMIOREG=m ++CONFIG_PLIP=m ++CONFIG_PPP=y ++CONFIG_PPP_BSDCOMP=m ++CONFIG_PPP_DEFLATE=m ++CONFIG_PPP_FILTER=y ++CONFIG_PPP_MPPE=m ++CONFIG_PPP_MULTILINK=y ++CONFIG_PPPOATM=m ++CONFIG_PPPOE=m ++CONFIG_PPTP=m ++CONFIG_PPPOL2TP=m ++CONFIG_PPP_ASYNC=m ++CONFIG_PPP_SYNC_TTY=m ++CONFIG_SLIP=m ++CONFIG_SLIP_COMPRESSED=y ++CONFIG_SLIP_SMART=y ++CONFIG_SLIP_MODE_SLIP6=y ++CONFIG_USB_NET_DRIVERS=m ++CONFIG_USB_CATC=m ++CONFIG_USB_KAWETH=m ++CONFIG_USB_PEGASUS=m ++CONFIG_USB_RTL8150=m ++CONFIG_USB_RTL8152=m ++CONFIG_USB_LAN78XX=m ++CONFIG_USB_USBNET=m ++CONFIG_USB_NET_CDC_EEM=m ++CONFIG_USB_NET_HUAWEI_CDC_NCM=m ++CONFIG_USB_NET_CDC_MBIM=m ++CONFIG_USB_NET_DM9601=m ++CONFIG_USB_NET_SR9700=m ++CONFIG_USB_NET_SR9800=m ++CONFIG_USB_NET_SMSC75XX=m ++CONFIG_USB_NET_SMSC95XX=m ++CONFIG_USB_NET_GL620A=m ++CONFIG_USB_NET_PLUSB=m ++CONFIG_USB_NET_MCS7830=m ++CONFIG_USB_NET_RNDIS_HOST=m ++CONFIG_USB_ALI_M5632=y ++CONFIG_USB_AN2720=y ++CONFIG_USB_EPSON2888=y ++CONFIG_USB_KC2190=y ++CONFIG_USB_NET_CX82310_ETH=m ++CONFIG_USB_NET_KALMIA=m ++CONFIG_USB_NET_QMI_WWAN=m ++CONFIG_USB_HSO=m ++CONFIG_USB_NET_INT51X1=m ++CONFIG_USB_CDC_PHONET=m ++CONFIG_USB_IPHETH=m ++CONFIG_USB_SIERRA_NET=m ++CONFIG_USB_VL600=m ++CONFIG_USB_NET_CH9200=m ++CONFIG_USB_NET_AQC111=m ++CONFIG_ADM8211=m ++CONFIG_ATH5K=m ++CONFIG_ATH9K=m ++CONFIG_ATH9K_AHB=y ++CONFIG_ATH9K_DEBUGFS=y ++CONFIG_ATH9K_STATION_STATISTICS=y ++CONFIG_ATH9K_WOW=y ++CONFIG_ATH9K_CHANNEL_CONTEXT=y ++CONFIG_ATH9K_PCI_NO_EEPROM=m ++CONFIG_ATH9K_HTC=m ++CONFIG_ATH9K_HTC_DEBUGFS=y ++CONFIG_ATH9K_HWRNG=y ++CONFIG_ATH9K_COMMON_SPECTRAL=y ++CONFIG_CARL9170=m ++CONFIG_CARL9170_HWRNG=y ++CONFIG_ATH6KL=m ++CONFIG_ATH6KL_SDIO=m ++CONFIG_ATH6KL_USB=m ++CONFIG_AR5523=m ++CONFIG_WIL6210=m ++CONFIG_WIL6210_TRACING=y ++CONFIG_ATH10K=m ++CONFIG_ATH10K_PCI=m ++CONFIG_ATH10K_AHB=y ++CONFIG_ATH10K_SDIO=m ++CONFIG_ATH10K_USB=m ++CONFIG_ATH10K_DEBUGFS=y ++CONFIG_ATH10K_SPECTRAL=y ++CONFIG_ATH10K_TRACING=y ++CONFIG_WCN36XX=m ++CONFIG_ATH11K=m ++CONFIG_ATH11K_PCI=m ++CONFIG_ATH11K_DEBUGFS=y ++CONFIG_ATH11K_TRACING=y ++CONFIG_ATH11K_SPECTRAL=y ++CONFIG_AT76C50X_USB=m ++CONFIG_B43=m ++CONFIG_B43LEGACY=m ++# CONFIG_B43LEGACY_DEBUG is not set ++CONFIG_BRCMSMAC=m ++CONFIG_BRCMFMAC=m ++CONFIG_BRCMFMAC_USB=y ++CONFIG_BRCMFMAC_PCIE=y ++CONFIG_BRCM_TRACING=y ++CONFIG_IPW2100=m ++CONFIG_IPW2100_MONITOR=y ++CONFIG_IPW2200=m ++CONFIG_IPW2200_MONITOR=y ++CONFIG_IPW2200_PROMISCUOUS=y ++CONFIG_IPW2200_QOS=y ++CONFIG_IWL4965=m ++CONFIG_IWL3945=m ++CONFIG_IWLEGACY_DEBUGFS=y ++CONFIG_IWLWIFI=m ++CONFIG_IWLDVM=m ++CONFIG_IWLMVM=m ++CONFIG_IWLWIFI_DEBUGFS=y ++CONFIG_P54_COMMON=m ++CONFIG_P54_USB=m ++CONFIG_P54_PCI=m ++CONFIG_P54_SPI=m ++CONFIG_LIBERTAS=m ++CONFIG_LIBERTAS_USB=m ++CONFIG_LIBERTAS_SDIO=m ++CONFIG_LIBERTAS_SPI=m ++CONFIG_LIBERTAS_MESH=y ++CONFIG_LIBERTAS_THINFIRM=m ++CONFIG_LIBERTAS_THINFIRM_USB=m ++CONFIG_MWIFIEX=m ++CONFIG_MWIFIEX_SDIO=m ++CONFIG_MWIFIEX_PCIE=m ++CONFIG_MWIFIEX_USB=m ++CONFIG_MWL8K=m ++CONFIG_MT7601U=m ++CONFIG_MT76x0U=m ++CONFIG_MT76x0E=m ++CONFIG_MT76x2E=m ++CONFIG_MT76x2U=m ++CONFIG_MT7603E=m ++CONFIG_MT7615E=m ++CONFIG_MT7663U=m ++CONFIG_MT7663S=m ++CONFIG_MT7915E=m ++CONFIG_MT7921E=m ++CONFIG_MT7921S=m ++CONFIG_MT7921U=m ++CONFIG_WILC1000_SDIO=m ++CONFIG_WILC1000_SPI=m ++CONFIG_WILC1000_HW_OOB_INTR=y ++CONFIG_PLFXLC=m ++CONFIG_RT2X00=m ++CONFIG_RT2400PCI=m ++CONFIG_RT2500PCI=m ++CONFIG_RT61PCI=m ++CONFIG_RT2800PCI=m ++CONFIG_RT2500USB=m ++CONFIG_RT73USB=m ++CONFIG_RT2800USB=m ++CONFIG_RT2800USB_RT3573=y ++CONFIG_RT2800USB_RT53XX=y ++CONFIG_RT2800USB_RT55XX=y ++CONFIG_RT2800USB_UNKNOWN=y ++CONFIG_RTL8180=m ++CONFIG_RTL8187=m ++CONFIG_RTL8192CE=m ++CONFIG_RTL8192SE=m ++CONFIG_RTL8192DE=m ++CONFIG_RTL8723AE=m ++CONFIG_RTL8723BE=m ++CONFIG_RTL8188EE=m ++CONFIG_RTL8192EE=m ++CONFIG_RTL8821AE=m ++CONFIG_RTL8192CU=m ++# CONFIG_RTLWIFI_DEBUG is not set ++CONFIG_RTL8XXXU=m ++CONFIG_RTL8XXXU_UNTESTED=y ++CONFIG_RTW88=m ++CONFIG_RTW88_8822BE=m ++CONFIG_RTW88_8822CE=m ++CONFIG_RTW88_8723DE=m ++CONFIG_RTW88_8821CE=m ++CONFIG_RTW88_DEBUG=y ++CONFIG_RTW88_DEBUGFS=y ++CONFIG_RTW89=m ++CONFIG_RTW89_8852AE=m ++CONFIG_RTW89_8852CE=m ++CONFIG_RTW89_DEBUGMSG=y ++CONFIG_RTW89_DEBUGFS=y ++CONFIG_RSI_91X=m ++# CONFIG_RSI_DEBUGFS is not set ++CONFIG_WFX=m ++CONFIG_CW1200=m ++CONFIG_CW1200_WLAN_SDIO=m ++CONFIG_CW1200_WLAN_SPI=m ++CONFIG_WL1251=m ++CONFIG_WL1251_SPI=m ++CONFIG_WL1251_SDIO=m ++CONFIG_WL12XX=m ++CONFIG_WL18XX=m ++CONFIG_WLCORE_SPI=m ++CONFIG_WLCORE_SDIO=m ++CONFIG_ZD1211RW=m ++CONFIG_QTNFMAC_PCIE=m ++CONFIG_MAC80211_HWSIM=m ++CONFIG_VIRT_WIFI=m ++CONFIG_WAN=y ++CONFIG_HDLC=m ++CONFIG_HDLC_RAW=m ++CONFIG_HDLC_RAW_ETH=m ++CONFIG_HDLC_CISCO=m ++CONFIG_HDLC_FR=m ++CONFIG_HDLC_PPP=m ++CONFIG_HDLC_X25=m ++CONFIG_PCI200SYN=m ++CONFIG_WANXL=m ++CONFIG_PC300TOO=m ++CONFIG_FARSYNC=m ++CONFIG_LAPBETHER=m ++CONFIG_IEEE802154_FAKELB=m ++CONFIG_IEEE802154_AT86RF230=m ++CONFIG_IEEE802154_MRF24J40=m ++CONFIG_IEEE802154_CC2520=m ++CONFIG_IEEE802154_ATUSB=m ++CONFIG_IEEE802154_ADF7242=m ++CONFIG_IEEE802154_CA8210=m ++CONFIG_IEEE802154_CA8210_DEBUGFS=y ++CONFIG_IEEE802154_MCR20A=m ++CONFIG_IEEE802154_HWSIM=m ++CONFIG_WWAN=m ++CONFIG_WWAN_HWSIM=m ++CONFIG_MHI_WWAN_CTRL=m ++CONFIG_MHI_WWAN_MBIM=m ++CONFIG_RPMSG_WWAN_CTRL=m ++CONFIG_IOSM=m ++CONFIG_MTK_T7XX=m ++CONFIG_VMXNET3=m ++CONFIG_USB4_NET=m ++CONFIG_NETDEVSIM=m ++CONFIG_ISDN=y ++CONFIG_MISDN=m ++CONFIG_MISDN_DSP=m ++CONFIG_MISDN_L1OIP=m ++CONFIG_MISDN_HFCPCI=m ++CONFIG_MISDN_HFCMULTI=m ++CONFIG_MISDN_HFCUSB=m ++CONFIG_MISDN_AVMFRITZ=m ++CONFIG_MISDN_SPEEDFAX=m ++CONFIG_MISDN_INFINEON=m ++CONFIG_MISDN_W6692=m ++CONFIG_MISDN_NETJET=m ++CONFIG_INPUT_LEDS=m ++CONFIG_INPUT_SPARSEKMAP=m ++CONFIG_INPUT_MOUSEDEV=y ++CONFIG_INPUT_MOUSEDEV_PSAUX=y ++CONFIG_INPUT_JOYDEV=m ++CONFIG_INPUT_EVDEV=y ++CONFIG_INPUT_EVBUG=m ++CONFIG_KEYBOARD_ADC=m ++CONFIG_KEYBOARD_ADP5520=m ++CONFIG_KEYBOARD_ADP5588=m ++CONFIG_KEYBOARD_ADP5589=m ++CONFIG_KEYBOARD_QT1050=m ++CONFIG_KEYBOARD_QT1070=m ++CONFIG_KEYBOARD_QT2160=m ++CONFIG_KEYBOARD_DLINK_DIR685=m ++CONFIG_KEYBOARD_LKKBD=m ++CONFIG_KEYBOARD_GPIO=m ++CONFIG_KEYBOARD_GPIO_POLLED=m ++CONFIG_KEYBOARD_TCA6416=m ++CONFIG_KEYBOARD_TCA8418=m ++CONFIG_KEYBOARD_MATRIX=m ++CONFIG_KEYBOARD_LM8323=m ++CONFIG_KEYBOARD_LM8333=m ++CONFIG_KEYBOARD_MAX7359=m ++CONFIG_KEYBOARD_MCS=m ++CONFIG_KEYBOARD_MPR121=m ++CONFIG_KEYBOARD_NEWTON=m ++CONFIG_KEYBOARD_OPENCORES=m ++CONFIG_KEYBOARD_PINEPHONE=m ++CONFIG_KEYBOARD_SAMSUNG=m ++CONFIG_KEYBOARD_GOLDFISH_EVENTS=m ++CONFIG_KEYBOARD_STOWAWAY=m ++CONFIG_KEYBOARD_SUNKBD=m ++CONFIG_KEYBOARD_STMPE=m ++CONFIG_KEYBOARD_IQS62X=m ++CONFIG_KEYBOARD_OMAP4=m ++CONFIG_KEYBOARD_TC3589X=m ++CONFIG_KEYBOARD_TM2_TOUCHKEY=m ++CONFIG_KEYBOARD_TWL4030=m ++CONFIG_KEYBOARD_XTKBD=m ++CONFIG_KEYBOARD_CAP11XX=m ++CONFIG_KEYBOARD_BCM=m ++CONFIG_KEYBOARD_MTK_PMIC=m ++CONFIG_KEYBOARD_CYPRESS_SF=m ++CONFIG_MOUSE_PS2=m ++CONFIG_MOUSE_PS2_ELANTECH=y ++CONFIG_MOUSE_PS2_SENTELIC=y ++CONFIG_MOUSE_PS2_TOUCHKIT=y ++CONFIG_MOUSE_SERIAL=m ++CONFIG_MOUSE_APPLETOUCH=m ++CONFIG_MOUSE_BCM5974=m ++CONFIG_MOUSE_CYAPA=m ++CONFIG_MOUSE_ELAN_I2C=m ++CONFIG_MOUSE_ELAN_I2C_SMBUS=y ++CONFIG_MOUSE_VSXXXAA=m ++CONFIG_MOUSE_GPIO=m ++CONFIG_MOUSE_SYNAPTICS_I2C=m ++CONFIG_MOUSE_SYNAPTICS_USB=m ++CONFIG_INPUT_JOYSTICK=y ++CONFIG_JOYSTICK_ANALOG=m ++CONFIG_JOYSTICK_A3D=m ++CONFIG_JOYSTICK_ADC=m ++CONFIG_JOYSTICK_ADI=m ++CONFIG_JOYSTICK_COBRA=m ++CONFIG_JOYSTICK_GF2K=m ++CONFIG_JOYSTICK_GRIP=m ++CONFIG_JOYSTICK_GRIP_MP=m ++CONFIG_JOYSTICK_GUILLEMOT=m ++CONFIG_JOYSTICK_INTERACT=m ++CONFIG_JOYSTICK_SIDEWINDER=m ++CONFIG_JOYSTICK_TMDC=m ++CONFIG_JOYSTICK_IFORCE=m ++CONFIG_JOYSTICK_IFORCE_USB=m ++CONFIG_JOYSTICK_IFORCE_232=m ++CONFIG_JOYSTICK_WARRIOR=m ++CONFIG_JOYSTICK_MAGELLAN=m ++CONFIG_JOYSTICK_SPACEORB=m ++CONFIG_JOYSTICK_SPACEBALL=m ++CONFIG_JOYSTICK_STINGER=m ++CONFIG_JOYSTICK_TWIDJOY=m ++CONFIG_JOYSTICK_ZHENHUA=m ++CONFIG_JOYSTICK_DB9=m ++CONFIG_JOYSTICK_GAMECON=m ++CONFIG_JOYSTICK_TURBOGRAFX=m ++CONFIG_JOYSTICK_AS5011=m ++CONFIG_JOYSTICK_JOYDUMP=m ++CONFIG_JOYSTICK_XPAD=m ++CONFIG_JOYSTICK_XPAD_FF=y ++CONFIG_JOYSTICK_XPAD_LEDS=y ++CONFIG_JOYSTICK_WALKERA0701=m ++CONFIG_JOYSTICK_PSXPAD_SPI=m ++CONFIG_JOYSTICK_PSXPAD_SPI_FF=y ++CONFIG_JOYSTICK_PXRC=m ++CONFIG_JOYSTICK_QWIIC=m ++CONFIG_JOYSTICK_FSIA6B=m ++CONFIG_JOYSTICK_SENSEHAT=m ++CONFIG_INPUT_TABLET=y ++CONFIG_TABLET_USB_ACECAD=m ++CONFIG_TABLET_USB_AIPTEK=m ++CONFIG_TABLET_USB_HANWANG=m ++CONFIG_TABLET_USB_KBTAB=m ++CONFIG_TABLET_USB_PEGASUS=m ++CONFIG_TABLET_SERIAL_WACOM4=m ++CONFIG_INPUT_TOUCHSCREEN=y ++CONFIG_TOUCHSCREEN_88PM860X=m ++CONFIG_TOUCHSCREEN_ADS7846=m ++CONFIG_TOUCHSCREEN_AD7877=m ++CONFIG_TOUCHSCREEN_AD7879=m ++CONFIG_TOUCHSCREEN_AD7879_I2C=m ++CONFIG_TOUCHSCREEN_AD7879_SPI=m ++CONFIG_TOUCHSCREEN_ADC=m ++CONFIG_TOUCHSCREEN_AR1021_I2C=m ++CONFIG_TOUCHSCREEN_ATMEL_MXT=m ++CONFIG_TOUCHSCREEN_ATMEL_MXT_T37=y ++CONFIG_TOUCHSCREEN_AUO_PIXCIR=m ++CONFIG_TOUCHSCREEN_BU21013=m ++CONFIG_TOUCHSCREEN_BU21029=m ++CONFIG_TOUCHSCREEN_CHIPONE_ICN8318=m ++CONFIG_TOUCHSCREEN_CY8CTMA140=m ++CONFIG_TOUCHSCREEN_CY8CTMG110=m ++CONFIG_TOUCHSCREEN_CYTTSP_CORE=m ++CONFIG_TOUCHSCREEN_CYTTSP_I2C=m ++CONFIG_TOUCHSCREEN_CYTTSP_SPI=m ++CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m ++CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m ++CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m ++CONFIG_TOUCHSCREEN_DA9034=m ++CONFIG_TOUCHSCREEN_DA9052=m ++CONFIG_TOUCHSCREEN_DYNAPRO=m ++CONFIG_TOUCHSCREEN_HAMPSHIRE=m ++CONFIG_TOUCHSCREEN_EETI=m ++CONFIG_TOUCHSCREEN_EGALAX=m ++CONFIG_TOUCHSCREEN_EGALAX_SERIAL=m ++CONFIG_TOUCHSCREEN_EXC3000=m ++CONFIG_TOUCHSCREEN_FUJITSU=m ++CONFIG_TOUCHSCREEN_GOODIX=m ++CONFIG_TOUCHSCREEN_HIDEEP=m ++CONFIG_TOUCHSCREEN_HYCON_HY46XX=m ++CONFIG_TOUCHSCREEN_ILI210X=m ++CONFIG_TOUCHSCREEN_ILITEK=m ++CONFIG_TOUCHSCREEN_S6SY761=m ++CONFIG_TOUCHSCREEN_GUNZE=m ++CONFIG_TOUCHSCREEN_EKTF2127=m ++CONFIG_TOUCHSCREEN_ELAN=m ++CONFIG_TOUCHSCREEN_ELO=m ++CONFIG_TOUCHSCREEN_WACOM_W8001=m ++CONFIG_TOUCHSCREEN_WACOM_I2C=m ++CONFIG_TOUCHSCREEN_MAX11801=m ++CONFIG_TOUCHSCREEN_MCS5000=m ++CONFIG_TOUCHSCREEN_MMS114=m ++CONFIG_TOUCHSCREEN_MELFAS_MIP4=m ++CONFIG_TOUCHSCREEN_MSG2638=m ++CONFIG_TOUCHSCREEN_MTOUCH=m ++CONFIG_TOUCHSCREEN_IMAGIS=m ++CONFIG_TOUCHSCREEN_IMX6UL_TSC=m ++CONFIG_TOUCHSCREEN_INEXIO=m ++CONFIG_TOUCHSCREEN_PENMOUNT=m ++CONFIG_TOUCHSCREEN_EDT_FT5X06=m ++CONFIG_TOUCHSCREEN_TOUCHRIGHT=m ++CONFIG_TOUCHSCREEN_TOUCHWIN=m ++CONFIG_TOUCHSCREEN_PIXCIR=m ++CONFIG_TOUCHSCREEN_WDT87XX_I2C=m ++CONFIG_TOUCHSCREEN_WM831X=m ++CONFIG_TOUCHSCREEN_WM97XX=m ++CONFIG_TOUCHSCREEN_USB_COMPOSITE=m ++CONFIG_TOUCHSCREEN_MC13783=m ++CONFIG_TOUCHSCREEN_TOUCHIT213=m ++CONFIG_TOUCHSCREEN_TSC_SERIO=m ++CONFIG_TOUCHSCREEN_TSC2004=m ++CONFIG_TOUCHSCREEN_TSC2005=m ++CONFIG_TOUCHSCREEN_TSC2007=m ++CONFIG_TOUCHSCREEN_TSC2007_IIO=y ++CONFIG_TOUCHSCREEN_PCAP=m ++CONFIG_TOUCHSCREEN_RM_TS=m ++CONFIG_TOUCHSCREEN_SILEAD=m ++CONFIG_TOUCHSCREEN_SIS_I2C=m ++CONFIG_TOUCHSCREEN_ST1232=m ++CONFIG_TOUCHSCREEN_STMFTS=m ++CONFIG_TOUCHSCREEN_STMPE=m ++CONFIG_TOUCHSCREEN_SUR40=m ++CONFIG_TOUCHSCREEN_SURFACE3_SPI=m ++CONFIG_TOUCHSCREEN_SX8654=m ++CONFIG_TOUCHSCREEN_TPS6507X=m ++CONFIG_TOUCHSCREEN_ZET6223=m ++CONFIG_TOUCHSCREEN_ZFORCE=m ++CONFIG_TOUCHSCREEN_COLIBRI_VF50=m ++CONFIG_TOUCHSCREEN_ROHM_BU21023=m ++CONFIG_TOUCHSCREEN_IQS5XX=m ++CONFIG_TOUCHSCREEN_ZINITIX=m ++CONFIG_INPUT_MISC=y ++CONFIG_INPUT_88PM860X_ONKEY=m ++CONFIG_INPUT_88PM80X_ONKEY=m ++CONFIG_INPUT_AD714X=m ++CONFIG_INPUT_ARIZONA_HAPTICS=m ++CONFIG_INPUT_ATC260X_ONKEY=m ++CONFIG_INPUT_ATMEL_CAPTOUCH=m ++CONFIG_INPUT_BMA150=m ++CONFIG_INPUT_E3X0_BUTTON=m ++CONFIG_INPUT_MAX77650_ONKEY=m ++CONFIG_INPUT_MAX8925_ONKEY=m ++CONFIG_INPUT_MC13783_PWRBUTTON=m ++CONFIG_INPUT_MMA8450=m ++CONFIG_INPUT_GPIO_BEEPER=m ++CONFIG_INPUT_GPIO_DECODER=m ++CONFIG_INPUT_GPIO_VIBRA=m ++CONFIG_INPUT_CPCAP_PWRBUTTON=m ++CONFIG_INPUT_ATI_REMOTE2=m ++CONFIG_INPUT_KEYSPAN_REMOTE=m ++CONFIG_INPUT_KXTJ9=m ++CONFIG_INPUT_POWERMATE=m ++CONFIG_INPUT_YEALINK=m ++CONFIG_INPUT_CM109=m ++CONFIG_INPUT_REGULATOR_HAPTIC=m ++CONFIG_INPUT_RETU_PWRBUTTON=m ++CONFIG_INPUT_TPS65218_PWRBUTTON=m ++CONFIG_INPUT_AXP20X_PEK=m ++CONFIG_INPUT_TWL4030_PWRBUTTON=m ++CONFIG_INPUT_TWL4030_VIBRA=m ++CONFIG_INPUT_TWL6040_VIBRA=m ++CONFIG_INPUT_UINPUT=y ++CONFIG_INPUT_PALMAS_PWRBUTTON=m ++CONFIG_INPUT_PCF50633_PMU=m ++CONFIG_INPUT_PCF8574=m ++CONFIG_INPUT_GPIO_ROTARY_ENCODER=m ++CONFIG_INPUT_DA7280_HAPTICS=m ++CONFIG_INPUT_DA9052_ONKEY=m ++CONFIG_INPUT_DA9055_ONKEY=m ++CONFIG_INPUT_DA9063_ONKEY=m ++CONFIG_INPUT_WM831X_ON=m ++CONFIG_INPUT_PCAP=m ++CONFIG_INPUT_ADXL34X=m ++CONFIG_INPUT_IBM_PANEL=m ++CONFIG_INPUT_IMS_PCU=m ++CONFIG_INPUT_IQS269A=m ++CONFIG_INPUT_IQS626A=m ++CONFIG_INPUT_IQS7222=m ++CONFIG_INPUT_CMA3000=m ++CONFIG_INPUT_CMA3000_I2C=m ++CONFIG_INPUT_DRV260X_HAPTICS=m ++CONFIG_INPUT_DRV2665_HAPTICS=m ++CONFIG_INPUT_DRV2667_HAPTICS=m ++CONFIG_INPUT_RAVE_SP_PWRBUTTON=m ++CONFIG_INPUT_RT5120_PWRKEY=m ++CONFIG_INPUT_STPMIC1_ONKEY=m ++CONFIG_RMI4_I2C=m ++CONFIG_RMI4_SPI=m ++CONFIG_RMI4_SMB=m ++CONFIG_RMI4_F34=y ++CONFIG_RMI4_F3A=y ++CONFIG_RMI4_F54=y ++CONFIG_SERIO_SERPORT=m ++CONFIG_SERIO_PARKBD=m ++CONFIG_SERIO_PCIPS2=m ++CONFIG_SERIO_RAW=m ++CONFIG_SERIO_ALTERA_PS2=m ++CONFIG_SERIO_PS2MULT=m ++CONFIG_SERIO_ARC_PS2=m ++CONFIG_SERIO_APBPS2=m ++CONFIG_SERIO_GPIO_PS2=m ++CONFIG_USERIO=m ++CONFIG_GAMEPORT_EMU10K1=m ++CONFIG_GAMEPORT_FM801=m ++CONFIG_LEGACY_PTY_COUNT=0 ++CONFIG_SERIAL_8250=y ++# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set ++CONFIG_SERIAL_8250_FINTEK=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_SERIAL_8250_EXAR=m ++CONFIG_SERIAL_8250_MEN_MCB=m ++CONFIG_SERIAL_8250_NR_UARTS=48 ++CONFIG_SERIAL_8250_RUNTIME_UARTS=32 ++CONFIG_SERIAL_8250_EXTENDED=y ++CONFIG_SERIAL_8250_MANY_PORTS=y ++CONFIG_SERIAL_8250_SHARE_IRQ=y ++CONFIG_SERIAL_8250_RSA=y ++CONFIG_SERIAL_8250_DW=y ++CONFIG_SERIAL_8250_RT288X=y ++CONFIG_SERIAL_8250_PERICOM=m ++CONFIG_SERIAL_OF_PLATFORM=y ++CONFIG_SERIAL_EARLYCON_RISCV_SBI=y ++CONFIG_SERIAL_KGDB_NMI=y ++CONFIG_SERIAL_MAX3100=m ++CONFIG_SERIAL_MAX310X=y ++CONFIG_SERIAL_UARTLITE=m ++CONFIG_SERIAL_JSM=m ++CONFIG_SERIAL_SCCNXP=y ++CONFIG_SERIAL_SCCNXP_CONSOLE=y ++CONFIG_SERIAL_SC16IS7XX=m ++CONFIG_SERIAL_SC16IS7XX_SPI=y ++CONFIG_SERIAL_ALTERA_JTAGUART=m ++CONFIG_SERIAL_ALTERA_UART=m ++CONFIG_SERIAL_XILINX_PS_UART=m ++CONFIG_SERIAL_ARC=m ++CONFIG_SERIAL_RP2=m ++CONFIG_SERIAL_FSL_LPUART=m ++CONFIG_SERIAL_FSL_LINFLEXUART=m ++CONFIG_SERIAL_CONEXANT_DIGICOLOR=m ++CONFIG_SERIAL_MEN_Z135=m ++CONFIG_SERIAL_SPRD=m ++CONFIG_SERIAL_LITEUART=m ++CONFIG_SERIAL_NONSTANDARD=y ++CONFIG_MOXA_INTELLIO=m ++CONFIG_MOXA_SMARTIO=m ++CONFIG_N_HDLC=m ++CONFIG_GOLDFISH_TTY=m ++CONFIG_N_GSM=m ++CONFIG_NOZOMI=m ++CONFIG_NULL_TTY=m ++CONFIG_HVC_RISCV_SBI=y ++CONFIG_RPMSG_TTY=m ++CONFIG_SERIAL_DEV_BUS=y ++CONFIG_TTY_PRINTK=y ++CONFIG_PRINTER=m ++CONFIG_PPDEV=m ++CONFIG_VIRTIO_CONSOLE=y ++CONFIG_IPMI_HANDLER=m ++CONFIG_IPMI_DEVICE_INTERFACE=m ++CONFIG_IPMI_SSIF=m ++CONFIG_IPMI_IPMB=m ++CONFIG_IPMI_WATCHDOG=m ++CONFIG_IPMI_POWEROFF=m ++CONFIG_IPMB_DEVICE_INTERFACE=m ++CONFIG_HW_RANDOM=y ++CONFIG_HW_RANDOM_TIMERIOMEM=m ++CONFIG_HW_RANDOM_BA431=m ++CONFIG_HW_RANDOM_VIRTIO=m ++CONFIG_HW_RANDOM_POLARFIRE_SOC=m ++CONFIG_HW_RANDOM_CCTRNG=m ++CONFIG_HW_RANDOM_XIPHERA=m ++CONFIG_APPLICOM=m ++CONFIG_TCG_TIS=y ++CONFIG_TCG_TIS_SPI=m ++CONFIG_TCG_TIS_SPI_CR50=y ++CONFIG_TCG_TIS_I2C=m ++CONFIG_TCG_TIS_I2C_CR50=m ++CONFIG_TCG_TIS_I2C_ATMEL=m ++CONFIG_TCG_TIS_I2C_INFINEON=m ++CONFIG_TCG_TIS_I2C_NUVOTON=m ++CONFIG_TCG_ATMEL=m ++CONFIG_TCG_VTPM_PROXY=m ++CONFIG_TCG_TIS_ST33ZP24_I2C=m ++CONFIG_TCG_TIS_ST33ZP24_SPI=m ++CONFIG_XILLYBUS=m ++CONFIG_XILLYBUS_PCIE=m ++CONFIG_XILLYBUS_OF=m ++CONFIG_XILLYUSB=m ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_ARB_GPIO_CHALLENGE=m ++CONFIG_I2C_MUX_GPIO=m ++CONFIG_I2C_MUX_GPMUX=m ++CONFIG_I2C_MUX_LTC4306=m ++CONFIG_I2C_MUX_PCA9541=m ++CONFIG_I2C_MUX_PCA954x=m ++CONFIG_I2C_MUX_PINCTRL=m ++CONFIG_I2C_MUX_REG=m ++CONFIG_I2C_DEMUX_PINCTRL=m ++CONFIG_I2C_MUX_MLXCPLD=m ++CONFIG_I2C_ALI1535=m ++CONFIG_I2C_ALI1563=m ++CONFIG_I2C_ALI15X3=m ++CONFIG_I2C_AMD756=m ++CONFIG_I2C_AMD8111=m ++CONFIG_I2C_I801=m ++CONFIG_I2C_ISCH=m ++CONFIG_I2C_PIIX4=m ++CONFIG_I2C_NFORCE2=m ++CONFIG_I2C_NVIDIA_GPU=m ++CONFIG_I2C_SIS5595=m ++CONFIG_I2C_SIS630=m ++CONFIG_I2C_SIS96X=m ++CONFIG_I2C_VIA=m ++CONFIG_I2C_VIAPRO=m ++CONFIG_I2C_CBUS_GPIO=m ++CONFIG_I2C_DESIGNWARE_PLATFORM=y ++CONFIG_I2C_DESIGNWARE_PCI=m ++CONFIG_I2C_GPIO=m ++CONFIG_I2C_KEMPLD=m ++CONFIG_I2C_MICROCHIP_CORE=m ++CONFIG_I2C_OCORES=m ++CONFIG_I2C_PCA_PLATFORM=m ++CONFIG_I2C_RK3X=m ++CONFIG_I2C_SIMTEC=m ++CONFIG_I2C_XILINX=m ++CONFIG_I2C_DIOLAN_U2C=m ++CONFIG_I2C_DLN2=m ++CONFIG_I2C_CP2615=m ++CONFIG_I2C_PARPORT=m ++CONFIG_I2C_PCI1XXXX=m ++CONFIG_I2C_ROBOTFUZZ_OSIF=m ++CONFIG_I2C_TAOS_EVM=m ++CONFIG_I2C_TINY_USB=m ++CONFIG_I2C_VIPERBOARD=m ++CONFIG_I2C_FSI=m ++CONFIG_I2C_VIRTIO=m ++CONFIG_I2C_STUB=m ++CONFIG_I2C_SLAVE=y ++CONFIG_I2C_SLAVE_EEPROM=m ++CONFIG_I3C=m ++CONFIG_CDNS_I3C_MASTER=m ++CONFIG_DW_I3C_MASTER=m ++CONFIG_SVC_I3C_MASTER=m ++CONFIG_MIPI_I3C_HCI=m ++CONFIG_SPI=y ++CONFIG_SPI_ALTERA=m ++CONFIG_SPI_ALTERA_DFL=m ++CONFIG_SPI_AXI_SPI_ENGINE=m ++CONFIG_SPI_BUTTERFLY=m ++CONFIG_SPI_CADENCE=m ++CONFIG_SPI_CADENCE_XSPI=m ++CONFIG_SPI_DESIGNWARE=y ++CONFIG_SPI_DW_DMA=y ++CONFIG_SPI_DW_PCI=m ++CONFIG_SPI_DW_MMIO=y ++CONFIG_SPI_DLN2=m ++CONFIG_SPI_FSI=m ++CONFIG_SPI_GPIO=m ++CONFIG_SPI_LM70_LLP=m ++CONFIG_SPI_FSL_SPI=y ++CONFIG_SPI_MICROCHIP_CORE=m ++CONFIG_SPI_MICROCHIP_CORE_QSPI=m ++CONFIG_SPI_OC_TINY=m ++CONFIG_SPI_PXA2XX=m ++CONFIG_SPI_SC18IS602=m ++CONFIG_SPI_SIFIVE=y ++CONFIG_SPI_MXIC=m ++CONFIG_SPI_XCOMM=m ++CONFIG_SPI_XILINX=m ++CONFIG_SPI_ZYNQMP_GQSPI=m ++CONFIG_SPI_AMD=m ++CONFIG_SPI_MUX=m ++CONFIG_SPI_SPIDEV=m ++CONFIG_SPI_LOOPBACK_TEST=m ++CONFIG_SPI_TLE62X0=m ++CONFIG_SPI_SLAVE=y ++CONFIG_SPI_SLAVE_TIME=m ++CONFIG_SPI_SLAVE_SYSTEM_CONTROL=m ++CONFIG_SPMI=m ++CONFIG_SPMI_HISI3670=m ++CONFIG_HSI=m ++CONFIG_HSI_CHAR=m ++CONFIG_PPS_CLIENT_LDISC=m ++CONFIG_PPS_CLIENT_PARPORT=m ++CONFIG_PPS_CLIENT_GPIO=m ++CONFIG_DP83640_PHY=m ++CONFIG_PTP_1588_CLOCK_INES=m ++CONFIG_PTP_1588_CLOCK_IDT82P33=m ++CONFIG_PTP_1588_CLOCK_IDTCM=m ++CONFIG_PTP_1588_CLOCK_OCP=m ++CONFIG_PINCTRL_AS3722=y ++CONFIG_PINCTRL_AXP209=m ++CONFIG_PINCTRL_CY8C95X0=m ++CONFIG_PINCTRL_DA9062=m ++CONFIG_PINCTRL_MAX77620=m ++CONFIG_PINCTRL_MCP23S08=m ++CONFIG_PINCTRL_MICROCHIP_SGPIO=y ++CONFIG_PINCTRL_OCELOT=y ++CONFIG_PINCTRL_PALMAS=y ++CONFIG_PINCTRL_SINGLE=y ++CONFIG_PINCTRL_STMFX=m ++CONFIG_PINCTRL_SX150X=y ++CONFIG_PINCTRL_LOCHNAGAR=m ++# CONFIG_PINCTRL_STARFIVE_JH7100 is not set ++CONFIG_PINCTRL_ULTRARISC_DP1000=y ++CONFIG_GPIO_SYSFS=y ++CONFIG_GPIO_74XX_MMIO=m ++CONFIG_GPIO_ALTERA=m ++CONFIG_GPIO_CADENCE=m ++CONFIG_GPIO_DWAPB=m ++CONFIG_GPIO_EXAR=m ++CONFIG_GPIO_FTGPIO010=y ++CONFIG_GPIO_GENERIC_PLATFORM=y ++CONFIG_GPIO_GRGPIO=m ++CONFIG_GPIO_HLWD=m ++CONFIG_GPIO_LOGICVC=m ++CONFIG_GPIO_MB86S7X=m ++CONFIG_GPIO_MENZ127=m ++CONFIG_GPIO_SIFIVE=y ++CONFIG_GPIO_SIOX=m ++CONFIG_GPIO_SYSCON=m ++CONFIG_GPIO_WCD934X=m ++CONFIG_GPIO_AMD_FCH=m ++CONFIG_GPIO_ADNP=m ++CONFIG_GPIO_GW_PLD=m ++CONFIG_GPIO_MAX7300=m ++CONFIG_GPIO_MAX732X=m ++CONFIG_GPIO_PCA953X=m ++CONFIG_GPIO_PCA953X_IRQ=y ++CONFIG_GPIO_PCA9570=m ++CONFIG_GPIO_PCF857X=m ++CONFIG_GPIO_TPIC2810=m ++CONFIG_GPIO_ADP5520=m ++CONFIG_GPIO_ARIZONA=m ++CONFIG_GPIO_BD71815=m ++CONFIG_GPIO_BD71828=m ++CONFIG_GPIO_BD9571MWV=m ++CONFIG_GPIO_DA9052=m ++CONFIG_GPIO_DA9055=m ++CONFIG_GPIO_DLN2=m ++CONFIG_GPIO_JANZ_TTL=m ++CONFIG_GPIO_KEMPLD=m ++CONFIG_GPIO_LP3943=m ++CONFIG_GPIO_LP873X=m ++CONFIG_GPIO_LP87565=m ++CONFIG_GPIO_MADERA=m ++CONFIG_GPIO_MAX77620=m ++CONFIG_GPIO_MAX77650=m ++CONFIG_GPIO_PALMAS=y ++CONFIG_GPIO_RC5T583=y ++CONFIG_GPIO_STMPE=y ++CONFIG_GPIO_TC3589X=y ++CONFIG_GPIO_TPS65086=m ++CONFIG_GPIO_TPS65218=m ++CONFIG_GPIO_TPS6586X=y ++CONFIG_GPIO_TPS65910=y ++CONFIG_GPIO_TPS65912=m ++CONFIG_GPIO_TQMX86=m ++CONFIG_GPIO_TWL4030=m ++CONFIG_GPIO_TWL6040=m ++CONFIG_GPIO_WM831X=m ++CONFIG_GPIO_WM8350=m ++CONFIG_GPIO_WM8994=m ++CONFIG_GPIO_PCI_IDIO_16=m ++CONFIG_GPIO_PCIE_IDIO_24=m ++CONFIG_GPIO_RDC321X=m ++CONFIG_GPIO_74X164=m ++CONFIG_GPIO_MAX3191X=m ++CONFIG_GPIO_MAX7301=m ++CONFIG_GPIO_MC33880=m ++CONFIG_GPIO_PISOSR=m ++CONFIG_GPIO_XRA1403=m ++CONFIG_GPIO_MOXTET=m ++CONFIG_GPIO_VIPERBOARD=m ++CONFIG_GPIO_AGGREGATOR=m ++CONFIG_GPIO_VIRTIO=m ++CONFIG_GPIO_SIM=m ++CONFIG_W1_MASTER_MATROX=m ++CONFIG_W1_MASTER_DS2490=m ++CONFIG_W1_MASTER_DS2482=m ++CONFIG_W1_MASTER_GPIO=m ++CONFIG_W1_MASTER_SGI=m ++CONFIG_W1_SLAVE_THERM=m ++CONFIG_W1_SLAVE_SMEM=m ++CONFIG_W1_SLAVE_DS2405=m ++CONFIG_W1_SLAVE_DS2408=m ++CONFIG_W1_SLAVE_DS2413=m ++CONFIG_W1_SLAVE_DS2406=m ++CONFIG_W1_SLAVE_DS2423=m ++CONFIG_W1_SLAVE_DS2805=m ++CONFIG_W1_SLAVE_DS2430=m ++CONFIG_W1_SLAVE_DS2431=m ++CONFIG_W1_SLAVE_DS2433=m ++CONFIG_W1_SLAVE_DS2438=m ++CONFIG_W1_SLAVE_DS250X=m ++CONFIG_W1_SLAVE_DS28E04=m ++CONFIG_W1_SLAVE_DS28E17=m ++CONFIG_POWER_RESET_AS3722=y ++CONFIG_POWER_RESET_ATC260X=m ++CONFIG_POWER_RESET_GPIO=y ++CONFIG_POWER_RESET_GPIO_RESTART=y ++CONFIG_POWER_RESET_LTC2952=y ++CONFIG_POWER_RESET_MT6323=y ++CONFIG_POWER_RESET_REGULATOR=y ++CONFIG_POWER_RESET_RESTART=y ++CONFIG_POWER_RESET_TPS65086=y ++CONFIG_SYSCON_REBOOT_MODE=m ++CONFIG_NVMEM_REBOOT_MODE=m ++CONFIG_GENERIC_ADC_BATTERY=m ++CONFIG_IP5XXX_POWER=m ++CONFIG_MAX8925_POWER=m ++CONFIG_WM831X_BACKUP=m ++CONFIG_WM831X_POWER=m ++CONFIG_WM8350_POWER=m ++CONFIG_TEST_POWER=m ++CONFIG_BATTERY_88PM860X=m ++CONFIG_CHARGER_ADP5061=m ++CONFIG_BATTERY_ACT8945A=m ++CONFIG_BATTERY_CW2015=m ++CONFIG_BATTERY_DS2760=m ++CONFIG_BATTERY_DS2780=m ++CONFIG_BATTERY_DS2781=m ++CONFIG_BATTERY_DS2782=m ++CONFIG_BATTERY_SAMSUNG_SDI=y ++CONFIG_BATTERY_SBS=m ++CONFIG_CHARGER_SBS=m ++CONFIG_MANAGER_SBS=m ++CONFIG_BATTERY_BQ27XXX=m ++CONFIG_BATTERY_DA9030=m ++CONFIG_BATTERY_DA9052=m ++CONFIG_CHARGER_DA9150=m ++CONFIG_BATTERY_DA9150=m ++CONFIG_CHARGER_AXP20X=m ++CONFIG_BATTERY_AXP20X=m ++CONFIG_AXP20X_POWER=m ++CONFIG_BATTERY_MAX17040=m ++CONFIG_BATTERY_MAX17042=m ++CONFIG_BATTERY_MAX1721X=m ++CONFIG_BATTERY_TWL4030_MADC=m ++CONFIG_CHARGER_88PM860X=m ++CONFIG_CHARGER_PCF50633=m ++CONFIG_BATTERY_RX51=m ++CONFIG_CHARGER_ISP1704=m ++CONFIG_CHARGER_MAX8903=m ++CONFIG_CHARGER_TWL4030=m ++CONFIG_CHARGER_LP8727=m ++CONFIG_CHARGER_LP8788=m ++CONFIG_CHARGER_GPIO=m ++CONFIG_CHARGER_MANAGER=y ++CONFIG_CHARGER_LT3651=m ++CONFIG_CHARGER_LTC4162L=m ++CONFIG_CHARGER_MAX14577=m ++CONFIG_CHARGER_DETECTOR_MAX14656=m ++CONFIG_CHARGER_MAX77650=m ++CONFIG_CHARGER_MAX77693=m ++CONFIG_CHARGER_MAX77976=m ++CONFIG_CHARGER_MAX8997=m ++CONFIG_CHARGER_MAX8998=m ++CONFIG_CHARGER_MP2629=m ++CONFIG_CHARGER_MT6360=m ++CONFIG_CHARGER_MT6370=m ++CONFIG_CHARGER_BQ2415X=m ++CONFIG_CHARGER_BQ24190=m ++CONFIG_CHARGER_BQ24257=m ++CONFIG_CHARGER_BQ24735=m ++CONFIG_CHARGER_BQ2515X=m ++CONFIG_CHARGER_BQ25890=m ++CONFIG_CHARGER_BQ25980=m ++CONFIG_CHARGER_BQ256XX=m ++CONFIG_CHARGER_SMB347=m ++CONFIG_CHARGER_TPS65090=m ++CONFIG_CHARGER_TPS65217=m ++CONFIG_BATTERY_GAUGE_LTC2941=m ++CONFIG_BATTERY_GOLDFISH=m ++CONFIG_BATTERY_RT5033=m ++CONFIG_CHARGER_RT9455=m ++CONFIG_CHARGER_UCS1002=m ++CONFIG_CHARGER_BD99954=m ++CONFIG_RN5T618_POWER=m ++CONFIG_BATTERY_UG3105=m ++CONFIG_SENSORS_AD7314=m ++CONFIG_SENSORS_AD7414=m ++CONFIG_SENSORS_AD7418=m ++CONFIG_SENSORS_ADM1025=m ++CONFIG_SENSORS_ADM1026=m ++CONFIG_SENSORS_ADM1029=m ++CONFIG_SENSORS_ADM1031=m ++CONFIG_SENSORS_ADM1177=m ++CONFIG_SENSORS_ADM9240=m ++CONFIG_SENSORS_ADT7310=m ++CONFIG_SENSORS_ADT7410=m ++CONFIG_SENSORS_ADT7411=m ++CONFIG_SENSORS_ADT7462=m ++CONFIG_SENSORS_ADT7470=m ++CONFIG_SENSORS_ADT7475=m ++CONFIG_SENSORS_AHT10=m ++CONFIG_SENSORS_AQUACOMPUTER_D5NEXT=m ++CONFIG_SENSORS_AS370=m ++CONFIG_SENSORS_ASC7621=m ++CONFIG_SENSORS_AXI_FAN_CONTROL=m ++CONFIG_SENSORS_ATXP1=m ++CONFIG_SENSORS_CORSAIR_CPRO=m ++CONFIG_SENSORS_CORSAIR_PSU=m ++CONFIG_SENSORS_DRIVETEMP=m ++CONFIG_SENSORS_DS620=m ++CONFIG_SENSORS_DS1621=m ++CONFIG_SENSORS_DA9052_ADC=m ++CONFIG_SENSORS_DA9055=m ++CONFIG_SENSORS_I5K_AMB=m ++CONFIG_SENSORS_F71805F=m ++CONFIG_SENSORS_F71882FG=m ++CONFIG_SENSORS_F75375S=m ++CONFIG_SENSORS_GSC=m ++CONFIG_SENSORS_MC13783_ADC=m ++CONFIG_SENSORS_FTSTEUTATES=m ++CONFIG_SENSORS_GL518SM=m ++CONFIG_SENSORS_GL520SM=m ++CONFIG_SENSORS_G760A=m ++CONFIG_SENSORS_G762=m ++CONFIG_SENSORS_GPIO_FAN=m ++CONFIG_SENSORS_HIH6130=m ++CONFIG_SENSORS_IBMAEM=m ++CONFIG_SENSORS_IBMPEX=m ++CONFIG_SENSORS_IIO_HWMON=m ++CONFIG_SENSORS_IT87=m ++CONFIG_SENSORS_JC42=m ++CONFIG_SENSORS_POWR1220=m ++CONFIG_SENSORS_LINEAGE=m ++CONFIG_SENSORS_LOCHNAGAR=m ++CONFIG_SENSORS_LTC2945=m ++CONFIG_SENSORS_LTC2947_I2C=m ++CONFIG_SENSORS_LTC2947_SPI=m ++CONFIG_SENSORS_LTC2990=m ++CONFIG_SENSORS_LTC2992=m ++CONFIG_SENSORS_LTC4151=m ++CONFIG_SENSORS_LTC4215=m ++CONFIG_SENSORS_LTC4222=m ++CONFIG_SENSORS_LTC4245=m ++CONFIG_SENSORS_LTC4260=m ++CONFIG_SENSORS_LTC4261=m ++CONFIG_SENSORS_MAX1111=m ++CONFIG_SENSORS_MAX127=m ++CONFIG_SENSORS_MAX16065=m ++CONFIG_SENSORS_MAX1619=m ++CONFIG_SENSORS_MAX1668=m ++CONFIG_SENSORS_MAX197=m ++CONFIG_SENSORS_MAX31722=m ++CONFIG_SENSORS_MAX31730=m ++CONFIG_SENSORS_MAX31760=m ++CONFIG_SENSORS_MAX6620=m ++CONFIG_SENSORS_MAX6621=m ++CONFIG_SENSORS_MAX6639=m ++CONFIG_SENSORS_MAX6650=m ++CONFIG_SENSORS_MAX6697=m ++CONFIG_SENSORS_MAX31790=m ++CONFIG_SENSORS_MCP3021=m ++CONFIG_SENSORS_TC654=m ++CONFIG_SENSORS_TPS23861=m ++CONFIG_SENSORS_MENF21BMC_HWMON=m ++CONFIG_SENSORS_MR75203=m ++CONFIG_SENSORS_ADCXX=m ++CONFIG_SENSORS_LM63=m ++CONFIG_SENSORS_LM70=m ++CONFIG_SENSORS_LM73=m ++CONFIG_SENSORS_LM75=m ++CONFIG_SENSORS_LM77=m ++CONFIG_SENSORS_LM78=m ++CONFIG_SENSORS_LM80=m ++CONFIG_SENSORS_LM83=m ++CONFIG_SENSORS_LM85=m ++CONFIG_SENSORS_LM87=m ++CONFIG_SENSORS_LM90=m ++CONFIG_SENSORS_LM92=m ++CONFIG_SENSORS_LM93=m ++CONFIG_SENSORS_LM95234=m ++CONFIG_SENSORS_LM95241=m ++CONFIG_SENSORS_LM95245=m ++CONFIG_SENSORS_PC87360=m ++CONFIG_SENSORS_PC87427=m ++CONFIG_SENSORS_NTC_THERMISTOR=m ++CONFIG_SENSORS_NCT6683=m ++CONFIG_SENSORS_NCT6775=m ++CONFIG_SENSORS_NCT6775_I2C=m ++CONFIG_SENSORS_NCT7802=m ++CONFIG_SENSORS_NCT7904=m ++CONFIG_SENSORS_NPCM7XX=m ++CONFIG_SENSORS_NZXT_KRAKEN2=m ++CONFIG_SENSORS_NZXT_SMART2=m ++CONFIG_SENSORS_PCF8591=m ++CONFIG_SENSORS_PECI_CPUTEMP=m ++CONFIG_SENSORS_PECI_DIMMTEMP=m ++CONFIG_PMBUS=m ++CONFIG_SENSORS_ADM1266=m ++CONFIG_SENSORS_ADM1275=m ++CONFIG_SENSORS_BEL_PFE=m ++CONFIG_SENSORS_BPA_RS600=m ++CONFIG_SENSORS_DELTA_AHE50DC_FAN=m ++CONFIG_SENSORS_FSP_3Y=m ++CONFIG_SENSORS_IBM_CFFPS=m ++CONFIG_SENSORS_DPS920AB=m ++CONFIG_SENSORS_INSPUR_IPSPS=m ++CONFIG_SENSORS_IR35221=m ++CONFIG_SENSORS_IR36021=m ++CONFIG_SENSORS_IR38064=m ++CONFIG_SENSORS_IR38064_REGULATOR=y ++CONFIG_SENSORS_IRPS5401=m ++CONFIG_SENSORS_ISL68137=m ++CONFIG_SENSORS_LM25066=m ++CONFIG_SENSORS_LM25066_REGULATOR=y ++CONFIG_SENSORS_LT7182S=m ++CONFIG_SENSORS_LTC2978=m ++CONFIG_SENSORS_LTC2978_REGULATOR=y ++CONFIG_SENSORS_LTC3815=m ++CONFIG_SENSORS_MAX15301=m ++CONFIG_SENSORS_MAX16064=m ++CONFIG_SENSORS_MAX16601=m ++CONFIG_SENSORS_MAX20730=m ++CONFIG_SENSORS_MAX20751=m ++CONFIG_SENSORS_MAX31785=m ++CONFIG_SENSORS_MAX34440=m ++CONFIG_SENSORS_MAX8688=m ++CONFIG_SENSORS_MP2888=m ++CONFIG_SENSORS_MP2975=m ++CONFIG_SENSORS_MP5023=m ++CONFIG_SENSORS_PIM4328=m ++CONFIG_SENSORS_PLI1209BC=m ++CONFIG_SENSORS_PLI1209BC_REGULATOR=y ++CONFIG_SENSORS_PM6764TR=m ++CONFIG_SENSORS_PXE1610=m ++CONFIG_SENSORS_Q54SJ108A2=m ++CONFIG_SENSORS_STPDDC60=m ++CONFIG_SENSORS_TPS40422=m ++CONFIG_SENSORS_TPS53679=m ++CONFIG_SENSORS_TPS546D24=m ++CONFIG_SENSORS_UCD9000=m ++CONFIG_SENSORS_UCD9200=m ++CONFIG_SENSORS_XDPE152=m ++CONFIG_SENSORS_XDPE122=m ++CONFIG_SENSORS_XDPE122_REGULATOR=y ++CONFIG_SENSORS_ZL6100=m ++CONFIG_SENSORS_SBTSI=m ++CONFIG_SENSORS_SBRMI=m ++CONFIG_SENSORS_SHT15=m ++CONFIG_SENSORS_SHT21=m ++CONFIG_SENSORS_SHT3x=m ++CONFIG_SENSORS_SHT4x=m ++CONFIG_SENSORS_SHTC1=m ++CONFIG_SENSORS_SIS5595=m ++CONFIG_SENSORS_SY7636A=m ++CONFIG_SENSORS_DME1737=m ++CONFIG_SENSORS_EMC1403=m ++CONFIG_SENSORS_EMC2103=m ++CONFIG_SENSORS_EMC2305=m ++CONFIG_SENSORS_EMC6W201=m ++CONFIG_SENSORS_SMSC47M1=m ++CONFIG_SENSORS_SMSC47M192=m ++CONFIG_SENSORS_SMSC47B397=m ++CONFIG_SENSORS_SCH5627=m ++CONFIG_SENSORS_SCH5636=m ++CONFIG_SENSORS_STTS751=m ++CONFIG_SENSORS_ADC128D818=m ++CONFIG_SENSORS_ADS7828=m ++CONFIG_SENSORS_ADS7871=m ++CONFIG_SENSORS_AMC6821=m ++CONFIG_SENSORS_INA209=m ++CONFIG_SENSORS_INA2XX=m ++CONFIG_SENSORS_INA238=m ++CONFIG_SENSORS_INA3221=m ++CONFIG_SENSORS_TC74=m ++CONFIG_SENSORS_THMC50=m ++CONFIG_SENSORS_TMP102=m ++CONFIG_SENSORS_TMP103=m ++CONFIG_SENSORS_TMP108=m ++CONFIG_SENSORS_TMP401=m ++CONFIG_SENSORS_TMP421=m ++CONFIG_SENSORS_TMP464=m ++CONFIG_SENSORS_TMP513=m ++CONFIG_SENSORS_VIA686A=m ++CONFIG_SENSORS_VT1211=m ++CONFIG_SENSORS_VT8231=m ++CONFIG_SENSORS_W83773G=m ++CONFIG_SENSORS_W83781D=m ++CONFIG_SENSORS_W83791D=m ++CONFIG_SENSORS_W83792D=m ++CONFIG_SENSORS_W83793=m ++CONFIG_SENSORS_W83795=m ++CONFIG_SENSORS_W83L785TS=m ++CONFIG_SENSORS_W83L786NG=m ++CONFIG_SENSORS_W83627HF=m ++CONFIG_SENSORS_W83627EHF=m ++CONFIG_SENSORS_WM831X=m ++CONFIG_SENSORS_WM8350=m ++CONFIG_THERMAL_NETLINK=y ++CONFIG_THERMAL_STATISTICS=y ++CONFIG_THERMAL_WRITABLE_TRIPS=y ++CONFIG_THERMAL_GOV_FAIR_SHARE=y ++CONFIG_THERMAL_GOV_BANG_BANG=y ++CONFIG_THERMAL_GOV_USER_SPACE=y ++CONFIG_CPU_THERMAL=y ++CONFIG_CPU_IDLE_THERMAL=y ++CONFIG_DEVFREQ_THERMAL=y ++CONFIG_THERMAL_EMULATION=y ++CONFIG_THERMAL_MMIO=y ++CONFIG_MAX77620_THERMAL=m ++CONFIG_DA9062_THERMAL=m ++CONFIG_GENERIC_ADC_THERMAL=m ++CONFIG_WATCHDOG=y ++CONFIG_WATCHDOG_SYSFS=y ++CONFIG_WATCHDOG_PRETIMEOUT_GOV=y ++CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC=m ++CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP=y ++CONFIG_SOFT_WATCHDOG=m ++CONFIG_SOFT_WATCHDOG_PRETIMEOUT=y ++CONFIG_BD957XMUF_WATCHDOG=m ++CONFIG_DA9052_WATCHDOG=m ++CONFIG_DA9055_WATCHDOG=m ++CONFIG_DA9063_WATCHDOG=m ++CONFIG_DA9062_WATCHDOG=m ++CONFIG_GPIO_WATCHDOG=m ++CONFIG_MENF21BMC_WATCHDOG=m ++CONFIG_MENZ069_WATCHDOG=m ++CONFIG_WM831X_WATCHDOG=m ++CONFIG_WM8350_WATCHDOG=m ++CONFIG_XILINX_WATCHDOG=m ++CONFIG_ZIIRAVE_WATCHDOG=m ++CONFIG_RAVE_SP_WATCHDOG=m ++CONFIG_CADENCE_WATCHDOG=m ++CONFIG_DW_WATCHDOG=m ++CONFIG_RN5T618_WATCHDOG=m ++CONFIG_TWL4030_WATCHDOG=m ++CONFIG_MAX63XX_WATCHDOG=m ++CONFIG_MAX77620_WATCHDOG=m ++CONFIG_RETU_WATCHDOG=m ++CONFIG_STPMIC1_WATCHDOG=m ++CONFIG_ALIM7101_WDT=m ++CONFIG_I6300ESB_WDT=m ++CONFIG_KEMPLD_WDT=m ++CONFIG_MEN_A21_WDT=m ++CONFIG_PCIPCWATCHDOG=m ++CONFIG_WDTPCI=m ++CONFIG_USBPCWATCHDOG=m ++CONFIG_SSB_SDIOHOST=y ++CONFIG_SSB_DRIVER_GPIO=y ++CONFIG_BCMA_HOST_SOC=y ++CONFIG_BCMA_DRIVER_GMAC_CMN=y ++CONFIG_BCMA_DRIVER_GPIO=y ++CONFIG_MFD_ACT8945A=m ++CONFIG_MFD_AS3711=y ++CONFIG_MFD_AS3722=y ++CONFIG_PMIC_ADP5520=y ++CONFIG_MFD_AAT2870_CORE=y ++CONFIG_MFD_ATMEL_FLEXCOM=m ++CONFIG_MFD_ATMEL_HLCDC=m ++CONFIG_MFD_BCM590XX=m ++CONFIG_MFD_BD9571MWV=m ++CONFIG_MFD_AXP20X_I2C=m ++CONFIG_MFD_MADERA=m ++CONFIG_MFD_MADERA_I2C=m ++CONFIG_MFD_MADERA_SPI=m ++CONFIG_MFD_CS47L15=y ++CONFIG_MFD_CS47L35=y ++CONFIG_MFD_CS47L85=y ++CONFIG_MFD_CS47L90=y ++CONFIG_MFD_CS47L92=y ++CONFIG_PMIC_DA903X=y ++CONFIG_MFD_DA9052_SPI=y ++CONFIG_MFD_DA9052_I2C=y ++CONFIG_MFD_DA9055=y ++CONFIG_MFD_DA9062=m ++CONFIG_MFD_DA9063=y ++CONFIG_MFD_DA9150=m ++CONFIG_MFD_DLN2=m ++CONFIG_MFD_GATEWORKS_GSC=m ++CONFIG_MFD_MC13XXX_SPI=m ++CONFIG_MFD_MC13XXX_I2C=m ++CONFIG_MFD_MP2629=m ++CONFIG_MFD_HI6421_PMIC=m ++CONFIG_MFD_HI6421_SPMI=m ++CONFIG_LPC_ICH=m ++CONFIG_MFD_IQS62X=m ++CONFIG_MFD_JANZ_CMODIO=m ++CONFIG_MFD_KEMPLD=m ++CONFIG_MFD_88PM800=m ++CONFIG_MFD_88PM805=m ++CONFIG_MFD_88PM860X=y ++CONFIG_MFD_MAX14577=y ++CONFIG_MFD_MAX77620=y ++CONFIG_MFD_MAX77650=m ++CONFIG_MFD_MAX77686=y ++CONFIG_MFD_MAX77693=y ++CONFIG_MFD_MAX77714=m ++CONFIG_MFD_MAX77843=y ++CONFIG_MFD_MAX8907=m ++CONFIG_MFD_MAX8925=y ++CONFIG_MFD_MAX8997=y ++CONFIG_MFD_MAX8998=y ++CONFIG_MFD_MT6360=m ++CONFIG_MFD_MT6370=m ++CONFIG_MFD_MT6397=m ++CONFIG_MFD_MENF21BMC=m ++CONFIG_MFD_OCELOT=m ++CONFIG_EZX_PCAP=y ++CONFIG_MFD_CPCAP=m ++CONFIG_MFD_VIPERBOARD=m ++CONFIG_MFD_NTXEC=m ++CONFIG_MFD_RETU=m ++CONFIG_MFD_PCF50633=m ++CONFIG_PCF50633_ADC=m ++CONFIG_PCF50633_GPIO=m ++CONFIG_MFD_SY7636A=m ++CONFIG_MFD_RT4831=m ++CONFIG_MFD_RT5033=m ++CONFIG_MFD_RT5120=m ++CONFIG_MFD_RC5T583=y ++CONFIG_MFD_RN5T618=m ++CONFIG_MFD_SEC_CORE=y ++CONFIG_MFD_SI476X_CORE=m ++CONFIG_MFD_SM501=m ++CONFIG_MFD_SM501_GPIO=y ++CONFIG_MFD_SKY81452=m ++CONFIG_MFD_STMPE=y ++CONFIG_STMPE_SPI=y ++CONFIG_MFD_LP3943=m ++CONFIG_MFD_LP8788=y ++CONFIG_MFD_TI_LMU=m ++CONFIG_MFD_PALMAS=y ++CONFIG_TPS6105X=m ++CONFIG_TPS65010=m ++CONFIG_TPS6507X=m ++CONFIG_MFD_TPS65086=m ++CONFIG_MFD_TPS65090=y ++CONFIG_MFD_TPS65217=m ++CONFIG_MFD_TI_LP873X=m ++CONFIG_MFD_TI_LP87565=m ++CONFIG_MFD_TPS65218=m ++CONFIG_MFD_TPS6586X=y ++CONFIG_MFD_TPS65910=y ++CONFIG_MFD_TPS65912_I2C=y ++CONFIG_MFD_TPS65912_SPI=y ++CONFIG_TWL4030_CORE=y ++CONFIG_TWL6040_CORE=y ++CONFIG_MFD_LM3533=m ++CONFIG_MFD_TC3589X=y ++CONFIG_MFD_TQMX86=m ++CONFIG_MFD_VX855=m ++CONFIG_MFD_LOCHNAGAR=y ++CONFIG_MFD_ARIZONA_I2C=m ++CONFIG_MFD_ARIZONA_SPI=m ++CONFIG_MFD_CS47L24=y ++CONFIG_MFD_WM5102=y ++CONFIG_MFD_WM5110=y ++CONFIG_MFD_WM8997=y ++CONFIG_MFD_WM8998=y ++CONFIG_MFD_WM8400=y ++CONFIG_MFD_WM831X_I2C=y ++CONFIG_MFD_WM831X_SPI=y ++CONFIG_MFD_WM8350_I2C=y ++CONFIG_MFD_WM8994=m ++CONFIG_MFD_ROHM_BD718XX=m ++CONFIG_MFD_ROHM_BD71828=m ++CONFIG_MFD_ROHM_BD957XMUF=m ++CONFIG_MFD_STPMIC1=m ++CONFIG_MFD_WCD934X=m ++CONFIG_MFD_ATC260X_I2C=m ++CONFIG_MFD_QCOM_PM8008=m ++CONFIG_RAVE_SP_CORE=m ++CONFIG_MFD_RSMU_I2C=m ++CONFIG_MFD_RSMU_SPI=m ++CONFIG_REGULATOR_FIXED_VOLTAGE=y ++CONFIG_REGULATOR_VIRTUAL_CONSUMER=m ++CONFIG_REGULATOR_USERSPACE_CONSUMER=m ++CONFIG_REGULATOR_88PG86X=m ++CONFIG_REGULATOR_88PM800=m ++CONFIG_REGULATOR_88PM8607=m ++CONFIG_REGULATOR_ACT8865=m ++CONFIG_REGULATOR_ACT8945A=m ++CONFIG_REGULATOR_AD5398=m ++CONFIG_REGULATOR_AAT2870=m ++CONFIG_REGULATOR_ARIZONA_LDO1=m ++CONFIG_REGULATOR_ARIZONA_MICSUPP=m ++CONFIG_REGULATOR_AS3711=m ++CONFIG_REGULATOR_AS3722=m ++CONFIG_REGULATOR_ATC260X=m ++CONFIG_REGULATOR_AXP20X=m ++CONFIG_REGULATOR_BCM590XX=m ++CONFIG_REGULATOR_BD71815=m ++CONFIG_REGULATOR_BD71828=m ++CONFIG_REGULATOR_BD718XX=m ++CONFIG_REGULATOR_BD9571MWV=m ++CONFIG_REGULATOR_BD957XMUF=m ++CONFIG_REGULATOR_CPCAP=m ++CONFIG_REGULATOR_DA903X=m ++CONFIG_REGULATOR_DA9052=m ++CONFIG_REGULATOR_DA9055=m ++CONFIG_REGULATOR_DA9062=m ++CONFIG_REGULATOR_DA9063=m ++CONFIG_REGULATOR_DA9121=m ++CONFIG_REGULATOR_DA9210=m ++CONFIG_REGULATOR_DA9211=m ++CONFIG_REGULATOR_FAN53555=m ++CONFIG_REGULATOR_FAN53880=m ++CONFIG_REGULATOR_GPIO=m ++CONFIG_REGULATOR_HI6421=m ++CONFIG_REGULATOR_HI6421V530=m ++CONFIG_REGULATOR_HI6421V600=m ++CONFIG_REGULATOR_ISL9305=m ++CONFIG_REGULATOR_ISL6271A=m ++CONFIG_REGULATOR_LM363X=m ++CONFIG_REGULATOR_LOCHNAGAR=m ++CONFIG_REGULATOR_LP3971=m ++CONFIG_REGULATOR_LP3972=m ++CONFIG_REGULATOR_LP872X=m ++CONFIG_REGULATOR_LP873X=m ++CONFIG_REGULATOR_LP8755=m ++CONFIG_REGULATOR_LP87565=m ++CONFIG_REGULATOR_LP8788=m ++CONFIG_REGULATOR_LTC3589=m ++CONFIG_REGULATOR_LTC3676=m ++CONFIG_REGULATOR_MAX14577=m ++CONFIG_REGULATOR_MAX1586=m ++CONFIG_REGULATOR_MAX77620=m ++CONFIG_REGULATOR_MAX77650=m ++CONFIG_REGULATOR_MAX8649=m ++CONFIG_REGULATOR_MAX8660=m ++CONFIG_REGULATOR_MAX8893=m ++CONFIG_REGULATOR_MAX8907=m ++CONFIG_REGULATOR_MAX8925=m ++CONFIG_REGULATOR_MAX8952=m ++CONFIG_REGULATOR_MAX8973=m ++CONFIG_REGULATOR_MAX8997=m ++CONFIG_REGULATOR_MAX8998=m ++CONFIG_REGULATOR_MAX20086=m ++CONFIG_REGULATOR_MAX77686=m ++CONFIG_REGULATOR_MAX77693=m ++CONFIG_REGULATOR_MAX77802=m ++CONFIG_REGULATOR_MAX77826=m ++CONFIG_REGULATOR_MC13783=m ++CONFIG_REGULATOR_MC13892=m ++CONFIG_REGULATOR_MCP16502=m ++CONFIG_REGULATOR_MP5416=m ++CONFIG_REGULATOR_MP8859=m ++CONFIG_REGULATOR_MP886X=m ++CONFIG_REGULATOR_MPQ7920=m ++CONFIG_REGULATOR_MT6311=m ++CONFIG_REGULATOR_MT6315=m ++CONFIG_REGULATOR_MT6323=m ++CONFIG_REGULATOR_MT6331=m ++CONFIG_REGULATOR_MT6332=m ++CONFIG_REGULATOR_MT6358=m ++CONFIG_REGULATOR_MT6359=m ++CONFIG_REGULATOR_MT6360=m ++CONFIG_REGULATOR_MT6370=m ++CONFIG_REGULATOR_MT6397=m ++CONFIG_REGULATOR_PALMAS=m ++CONFIG_REGULATOR_PCA9450=m ++CONFIG_REGULATOR_PCAP=m ++CONFIG_REGULATOR_PCF50633=m ++CONFIG_REGULATOR_PF8X00=m ++CONFIG_REGULATOR_PFUZE100=m ++CONFIG_REGULATOR_PV88060=m ++CONFIG_REGULATOR_PV88080=m ++CONFIG_REGULATOR_PV88090=m ++CONFIG_REGULATOR_QCOM_SPMI=m ++CONFIG_REGULATOR_QCOM_USB_VBUS=m ++CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY=m ++CONFIG_REGULATOR_RC5T583=m ++CONFIG_REGULATOR_RN5T618=m ++CONFIG_REGULATOR_RT4801=m ++CONFIG_REGULATOR_RT4831=m ++CONFIG_REGULATOR_RT5033=m ++CONFIG_REGULATOR_RT5120=m ++CONFIG_REGULATOR_RT5190A=m ++CONFIG_REGULATOR_RT5759=m ++CONFIG_REGULATOR_RT6160=m ++CONFIG_REGULATOR_RT6245=m ++CONFIG_REGULATOR_RTQ2134=m ++CONFIG_REGULATOR_RTMV20=m ++CONFIG_REGULATOR_RTQ6752=m ++CONFIG_REGULATOR_S2MPA01=m ++CONFIG_REGULATOR_S2MPS11=m ++CONFIG_REGULATOR_S5M8767=m ++CONFIG_REGULATOR_SKY81452=m ++CONFIG_REGULATOR_SLG51000=m ++CONFIG_REGULATOR_STPMIC1=m ++CONFIG_REGULATOR_SY7636A=m ++CONFIG_REGULATOR_SY8106A=m ++CONFIG_REGULATOR_SY8824X=m ++CONFIG_REGULATOR_SY8827N=m ++CONFIG_REGULATOR_TPS51632=m ++CONFIG_REGULATOR_TPS62360=m ++CONFIG_REGULATOR_TPS6286X=m ++CONFIG_REGULATOR_TPS65023=m ++CONFIG_REGULATOR_TPS6507X=m ++CONFIG_REGULATOR_TPS65086=m ++CONFIG_REGULATOR_TPS65090=m ++CONFIG_REGULATOR_TPS65132=m ++CONFIG_REGULATOR_TPS65217=m ++CONFIG_REGULATOR_TPS65218=m ++CONFIG_REGULATOR_TPS6524X=m ++CONFIG_REGULATOR_TPS6586X=m ++CONFIG_REGULATOR_TPS65910=m ++CONFIG_REGULATOR_TPS65912=m ++CONFIG_REGULATOR_TWL4030=m ++CONFIG_REGULATOR_VCTRL=m ++CONFIG_REGULATOR_WM831X=m ++CONFIG_REGULATOR_WM8350=m ++CONFIG_REGULATOR_WM8400=m ++CONFIG_REGULATOR_WM8994=m ++CONFIG_REGULATOR_QCOM_LABIBB=m ++CONFIG_RC_CORE=m ++CONFIG_LIRC=y ++CONFIG_RC_DECODERS=y ++CONFIG_IR_IMON_DECODER=m ++CONFIG_IR_JVC_DECODER=m ++CONFIG_IR_MCE_KBD_DECODER=m ++CONFIG_IR_NEC_DECODER=m ++CONFIG_IR_RC5_DECODER=m ++CONFIG_IR_RC6_DECODER=m ++CONFIG_IR_RCMM_DECODER=m ++CONFIG_IR_SANYO_DECODER=m ++CONFIG_IR_SHARP_DECODER=m ++CONFIG_IR_SONY_DECODER=m ++CONFIG_IR_XMP_DECODER=m ++CONFIG_RC_DEVICES=y ++CONFIG_IR_GPIO_CIR=m ++CONFIG_IR_GPIO_TX=m ++CONFIG_IR_HIX5HD2=m ++CONFIG_IR_IGORPLUGUSB=m ++CONFIG_IR_IGUANA=m ++CONFIG_IR_IMON=m ++CONFIG_IR_IMON_RAW=m ++CONFIG_IR_MCEUSB=m ++CONFIG_IR_REDRAT3=m ++CONFIG_IR_SERIAL=m ++CONFIG_IR_SERIAL_TRANSMITTER=y ++CONFIG_IR_SPI=m ++CONFIG_IR_STREAMZAP=m ++CONFIG_IR_TOY=m ++CONFIG_IR_TTUSBIR=m ++CONFIG_RC_ATI_REMOTE=m ++CONFIG_RC_LOOPBACK=m ++CONFIG_RC_XBOX_DVD=m ++CONFIG_MEDIA_CEC_RC=y ++CONFIG_MEDIA_CEC_SUPPORT=y ++CONFIG_CEC_CH7322=m ++CONFIG_USB_PULSE8_CEC=m ++CONFIG_USB_RAINSHADOW_CEC=m ++CONFIG_MEDIA_SUPPORT=m ++CONFIG_MEDIA_SUPPORT_FILTER=y ++CONFIG_MEDIA_CAMERA_SUPPORT=y ++CONFIG_MEDIA_ANALOG_TV_SUPPORT=y ++CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y ++CONFIG_MEDIA_RADIO_SUPPORT=y ++CONFIG_MEDIA_SDR_SUPPORT=y ++CONFIG_MEDIA_PLATFORM_SUPPORT=y ++CONFIG_MEDIA_TEST_SUPPORT=y ++CONFIG_V4L2_FLASH_LED_CLASS=m ++CONFIG_DVB_MAX_ADAPTERS=8 ++CONFIG_MEDIA_USB_SUPPORT=y ++CONFIG_USB_GSPCA=m ++CONFIG_USB_GSPCA_BENQ=m ++CONFIG_USB_GSPCA_CONEX=m ++CONFIG_USB_GSPCA_CPIA1=m ++CONFIG_USB_GSPCA_DTCS033=m ++CONFIG_USB_GSPCA_ETOMS=m ++CONFIG_USB_GSPCA_FINEPIX=m ++CONFIG_USB_GSPCA_JEILINJ=m ++CONFIG_USB_GSPCA_JL2005BCD=m ++CONFIG_USB_GSPCA_KINECT=m ++CONFIG_USB_GSPCA_KONICA=m ++CONFIG_USB_GSPCA_MARS=m ++CONFIG_USB_GSPCA_MR97310A=m ++CONFIG_USB_GSPCA_NW80X=m ++CONFIG_USB_GSPCA_OV519=m ++CONFIG_USB_GSPCA_OV534=m ++CONFIG_USB_GSPCA_OV534_9=m ++CONFIG_USB_GSPCA_PAC207=m ++CONFIG_USB_GSPCA_PAC7302=m ++CONFIG_USB_GSPCA_PAC7311=m ++CONFIG_USB_GSPCA_SE401=m ++CONFIG_USB_GSPCA_SN9C2028=m ++CONFIG_USB_GSPCA_SN9C20X=m ++CONFIG_USB_GSPCA_SONIXB=m ++CONFIG_USB_GSPCA_SONIXJ=m ++CONFIG_USB_GSPCA_SPCA1528=m ++CONFIG_USB_GSPCA_SPCA500=m ++CONFIG_USB_GSPCA_SPCA501=m ++CONFIG_USB_GSPCA_SPCA505=m ++CONFIG_USB_GSPCA_SPCA506=m ++CONFIG_USB_GSPCA_SPCA508=m ++CONFIG_USB_GSPCA_SPCA561=m ++CONFIG_USB_GSPCA_SQ905=m ++CONFIG_USB_GSPCA_SQ905C=m ++CONFIG_USB_GSPCA_SQ930X=m ++CONFIG_USB_GSPCA_STK014=m ++CONFIG_USB_GSPCA_STK1135=m ++CONFIG_USB_GSPCA_STV0680=m ++CONFIG_USB_GSPCA_SUNPLUS=m ++CONFIG_USB_GSPCA_T613=m ++CONFIG_USB_GSPCA_TOPRO=m ++CONFIG_USB_GSPCA_TOUPTEK=m ++CONFIG_USB_GSPCA_TV8532=m ++CONFIG_USB_GSPCA_VC032X=m ++CONFIG_USB_GSPCA_VICAM=m ++CONFIG_USB_GSPCA_XIRLINK_CIT=m ++CONFIG_USB_GSPCA_ZC3XX=m ++CONFIG_USB_GL860=m ++CONFIG_USB_M5602=m ++CONFIG_USB_STV06XX=m ++CONFIG_USB_PWC=m ++CONFIG_USB_S2255=m ++CONFIG_VIDEO_USBTV=m ++CONFIG_USB_VIDEO_CLASS=m ++CONFIG_VIDEO_GO7007=m ++CONFIG_VIDEO_GO7007_USB=m ++CONFIG_VIDEO_GO7007_USB_S2250_BOARD=m ++CONFIG_VIDEO_HDPVR=m ++CONFIG_VIDEO_PVRUSB2=m ++CONFIG_VIDEO_AU0828=m ++CONFIG_VIDEO_AU0828_RC=y ++CONFIG_VIDEO_CX231XX=m ++CONFIG_VIDEO_CX231XX_ALSA=m ++CONFIG_VIDEO_CX231XX_DVB=m ++CONFIG_DVB_AS102=m ++CONFIG_DVB_B2C2_FLEXCOP_USB=m ++CONFIG_DVB_USB_V2=m ++CONFIG_DVB_USB_AF9015=m ++CONFIG_DVB_USB_AF9035=m ++CONFIG_DVB_USB_ANYSEE=m ++CONFIG_DVB_USB_AU6610=m ++CONFIG_DVB_USB_AZ6007=m ++CONFIG_DVB_USB_CE6230=m ++CONFIG_DVB_USB_DVBSKY=m ++CONFIG_DVB_USB_EC168=m ++CONFIG_DVB_USB_GL861=m ++CONFIG_DVB_USB_LME2510=m ++CONFIG_DVB_USB_MXL111SF=m ++CONFIG_DVB_USB_RTL28XXU=m ++CONFIG_DVB_USB_ZD1301=m ++CONFIG_DVB_USB=m ++CONFIG_DVB_USB_A800=m ++CONFIG_DVB_USB_AF9005=m ++CONFIG_DVB_USB_AF9005_REMOTE=m ++CONFIG_DVB_USB_AZ6027=m ++CONFIG_DVB_USB_CINERGY_T2=m ++CONFIG_DVB_USB_CXUSB=m ++CONFIG_DVB_USB_CXUSB_ANALOG=y ++CONFIG_DVB_USB_DIB0700=m ++CONFIG_DVB_USB_DIBUSB_MB=m ++CONFIG_DVB_USB_DIBUSB_MC=m ++CONFIG_DVB_USB_DIGITV=m ++CONFIG_DVB_USB_DTT200U=m ++CONFIG_DVB_USB_DTV5100=m ++CONFIG_DVB_USB_DW2102=m ++CONFIG_DVB_USB_GP8PSK=m ++CONFIG_DVB_USB_M920X=m ++CONFIG_DVB_USB_NOVA_T_USB2=m ++CONFIG_DVB_USB_OPERA1=m ++CONFIG_DVB_USB_PCTV452E=m ++CONFIG_DVB_USB_TECHNISAT_USB2=m ++CONFIG_DVB_USB_TTUSB2=m ++CONFIG_DVB_USB_UMT_010=m ++CONFIG_DVB_USB_VP702X=m ++CONFIG_DVB_USB_VP7045=m ++CONFIG_SMS_USB_DRV=m ++CONFIG_DVB_TTUSB_BUDGET=m ++CONFIG_DVB_TTUSB_DEC=m ++CONFIG_VIDEO_EM28XX=m ++CONFIG_VIDEO_EM28XX_V4L2=m ++CONFIG_VIDEO_EM28XX_ALSA=m ++CONFIG_VIDEO_EM28XX_DVB=m ++CONFIG_USB_AIRSPY=m ++CONFIG_USB_HACKRF=m ++CONFIG_USB_MSI2500=m ++CONFIG_MEDIA_PCI_SUPPORT=y ++CONFIG_VIDEO_SOLO6X10=m ++CONFIG_VIDEO_TW5864=m ++CONFIG_VIDEO_TW68=m ++CONFIG_VIDEO_TW686X=m ++CONFIG_VIDEO_DT3155=m ++CONFIG_VIDEO_IVTV=m ++CONFIG_VIDEO_IVTV_ALSA=m ++CONFIG_VIDEO_FB_IVTV=m ++CONFIG_VIDEO_BT848=m ++CONFIG_DVB_BT8XX=m ++CONFIG_VIDEO_CX18=m ++CONFIG_VIDEO_CX18_ALSA=m ++CONFIG_VIDEO_CX23885=m ++CONFIG_MEDIA_ALTERA_CI=m ++CONFIG_VIDEO_CX25821=m ++CONFIG_VIDEO_CX25821_ALSA=m ++CONFIG_VIDEO_CX88=m ++CONFIG_VIDEO_CX88_ALSA=m ++CONFIG_VIDEO_CX88_BLACKBIRD=m ++CONFIG_VIDEO_CX88_DVB=m ++CONFIG_VIDEO_SAA7134=m ++CONFIG_VIDEO_SAA7134_ALSA=m ++CONFIG_VIDEO_SAA7134_DVB=m ++CONFIG_VIDEO_SAA7134_GO7007=m ++CONFIG_VIDEO_SAA7164=m ++CONFIG_DVB_B2C2_FLEXCOP_PCI=m ++CONFIG_DVB_DDBRIDGE=m ++CONFIG_DVB_DM1105=m ++CONFIG_MANTIS_CORE=m ++CONFIG_DVB_MANTIS=m ++CONFIG_DVB_HOPPER=m ++CONFIG_DVB_NETUP_UNIDVB=m ++CONFIG_DVB_NGENE=m ++CONFIG_DVB_PLUTO2=m ++CONFIG_DVB_PT1=m ++CONFIG_DVB_PT3=m ++CONFIG_DVB_SMIPCIE=m ++CONFIG_RADIO_MAXIRADIO=m ++CONFIG_RADIO_SAA7706H=m ++CONFIG_RADIO_SHARK=m ++CONFIG_RADIO_SHARK2=m ++CONFIG_RADIO_SI4713=m ++CONFIG_RADIO_SI476X=m ++CONFIG_RADIO_TEA5764=m ++CONFIG_RADIO_TEF6862=m ++CONFIG_RADIO_WL1273=m ++CONFIG_USB_DSBR=m ++CONFIG_USB_KEENE=m ++CONFIG_USB_MA901=m ++CONFIG_USB_MR800=m ++CONFIG_USB_RAREMONO=m ++CONFIG_RADIO_SI470X=m ++CONFIG_USB_SI470X=m ++CONFIG_I2C_SI470X=m ++CONFIG_USB_SI4713=m ++CONFIG_PLATFORM_SI4713=m ++CONFIG_RADIO_WL128X=m ++CONFIG_V4L_PLATFORM_DRIVERS=y ++CONFIG_SDR_PLATFORM_DRIVERS=y ++CONFIG_DVB_PLATFORM_DRIVERS=y ++CONFIG_V4L_MEM2MEM_DRIVERS=y ++CONFIG_VIDEO_MEM2MEM_DEINTERLACE=m ++CONFIG_VIDEO_MUX=m ++CONFIG_VIDEO_CADENCE_CSI2RX=m ++CONFIG_VIDEO_CADENCE_CSI2TX=m ++CONFIG_VIDEO_CAFE_CCIC=m ++CONFIG_VIDEO_XILINX=m ++CONFIG_VIDEO_XILINX_CSI2RXSS=m ++CONFIG_VIDEO_XILINX_TPG=m ++CONFIG_SMS_SDIO_DRV=m ++CONFIG_V4L_TEST_DRIVERS=y ++CONFIG_VIDEO_VIM2M=m ++CONFIG_VIDEO_VICODEC=m ++CONFIG_VIDEO_VIMC=m ++CONFIG_VIDEO_VIVID=m ++CONFIG_VIDEO_VIVID_CEC=y ++CONFIG_DVB_FIREDTV=m ++CONFIG_SMS_SIANO_DEBUGFS=y ++CONFIG_VIDEO_AR0521=m ++CONFIG_VIDEO_HI556=m ++CONFIG_VIDEO_HI846=m ++CONFIG_VIDEO_HI847=m ++CONFIG_VIDEO_IMX208=m ++CONFIG_VIDEO_IMX214=m ++CONFIG_VIDEO_IMX219=m ++CONFIG_VIDEO_IMX258=m ++CONFIG_VIDEO_IMX274=m ++CONFIG_VIDEO_IMX290=m ++CONFIG_VIDEO_IMX319=m ++CONFIG_VIDEO_IMX334=m ++CONFIG_VIDEO_IMX335=m ++CONFIG_VIDEO_IMX355=m ++CONFIG_VIDEO_IMX412=m ++CONFIG_VIDEO_MT9M001=m ++CONFIG_VIDEO_MT9M111=m ++CONFIG_VIDEO_MT9P031=m ++CONFIG_VIDEO_MT9T112=m ++CONFIG_VIDEO_MT9V032=m ++CONFIG_VIDEO_MT9V111=m ++CONFIG_VIDEO_OG01A1B=m ++CONFIG_VIDEO_OV02A10=m ++CONFIG_VIDEO_OV08D10=m ++CONFIG_VIDEO_OV13858=m ++CONFIG_VIDEO_OV13B10=m ++CONFIG_VIDEO_OV2659=m ++CONFIG_VIDEO_OV2680=m ++CONFIG_VIDEO_OV2685=m ++CONFIG_VIDEO_OV5640=m ++CONFIG_VIDEO_OV5645=m ++CONFIG_VIDEO_OV5647=m ++CONFIG_VIDEO_OV5648=m ++CONFIG_VIDEO_OV5670=m ++CONFIG_VIDEO_OV5675=m ++CONFIG_VIDEO_OV5693=m ++CONFIG_VIDEO_OV5695=m ++CONFIG_VIDEO_OV6650=m ++CONFIG_VIDEO_OV7251=m ++CONFIG_VIDEO_OV772X=m ++CONFIG_VIDEO_OV7740=m ++CONFIG_VIDEO_OV8856=m ++CONFIG_VIDEO_OV8865=m ++CONFIG_VIDEO_OV9282=m ++CONFIG_VIDEO_OV9640=m ++CONFIG_VIDEO_OV9650=m ++CONFIG_VIDEO_RDACM20=m ++CONFIG_VIDEO_RDACM21=m ++CONFIG_VIDEO_RJ54N1=m ++CONFIG_VIDEO_S5C73M3=m ++CONFIG_VIDEO_S5K5BAF=m ++CONFIG_VIDEO_S5K6A3=m ++CONFIG_VIDEO_CCS=m ++CONFIG_VIDEO_ET8EK8=m ++CONFIG_VIDEO_AD5820=m ++CONFIG_VIDEO_AK7375=m ++CONFIG_VIDEO_DW9714=m ++CONFIG_VIDEO_DW9768=m ++CONFIG_VIDEO_DW9807_VCM=m ++CONFIG_VIDEO_ADP1653=m ++CONFIG_VIDEO_LM3560=m ++CONFIG_VIDEO_LM3646=m ++CONFIG_VIDEO_TDA1997X=m ++CONFIG_VIDEO_TDA9840=m ++CONFIG_VIDEO_TEA6415C=m ++CONFIG_VIDEO_TEA6420=m ++CONFIG_VIDEO_TLV320AIC23B=m ++CONFIG_VIDEO_ADV7180=m ++CONFIG_VIDEO_ADV7183=m ++CONFIG_VIDEO_ADV748X=m ++CONFIG_VIDEO_ADV7604=m ++CONFIG_VIDEO_ADV7604_CEC=y ++CONFIG_VIDEO_ADV7842=m ++CONFIG_VIDEO_ADV7842_CEC=y ++CONFIG_VIDEO_BT819=m ++CONFIG_VIDEO_BT856=m ++CONFIG_VIDEO_BT866=m ++CONFIG_VIDEO_ISL7998X=m ++CONFIG_VIDEO_KS0127=m ++CONFIG_VIDEO_MAX9286=m ++CONFIG_VIDEO_ML86V7667=m ++CONFIG_VIDEO_SAA7110=m ++CONFIG_VIDEO_TC358743=m ++CONFIG_VIDEO_TC358743_CEC=y ++CONFIG_VIDEO_TVP514X=m ++CONFIG_VIDEO_TVP7002=m ++CONFIG_VIDEO_TW9910=m ++CONFIG_VIDEO_VPX3220=m ++CONFIG_VIDEO_ADV7170=m ++CONFIG_VIDEO_ADV7175=m ++CONFIG_VIDEO_ADV7343=m ++CONFIG_VIDEO_ADV7393=m ++CONFIG_VIDEO_AK881X=m ++CONFIG_VIDEO_SAA7185=m ++CONFIG_VIDEO_THS8200=m ++CONFIG_SDR_MAX2175=m ++CONFIG_VIDEO_I2C=m ++CONFIG_VIDEO_ST_MIPID02=m ++CONFIG_VIDEO_THS7303=m ++CONFIG_CXD2880_SPI_DRV=m ++CONFIG_VIDEO_GS1662=m ++CONFIG_DVB_TDA8261=m ++CONFIG_DVB_TUA6100=m ++CONFIG_DVB_VES1X93=m ++CONFIG_DVB_DIB9000=m ++CONFIG_DVB_L64781=m ++CONFIG_DVB_S5H1432=m ++CONFIG_DVB_CXD2880=m ++CONFIG_DVB_MN88443X=m ++CONFIG_DVB_LGS8GL5=m ++CONFIG_DVB_LNBH29=m ++CONFIG_DVB_DUMMY_FE=m ++CONFIG_HD44780=m ++CONFIG_KS0108=m ++CONFIG_IMG_ASCII_LCD=m ++CONFIG_HT16K33=m ++CONFIG_LCD2S=m ++CONFIG_PANEL=m ++CONFIG_DRM=y ++CONFIG_DRM_LOAD_EDID_FIRMWARE=y ++CONFIG_DRM_DP_AUX_CHARDEV=y ++CONFIG_DRM_DP_CEC=y ++CONFIG_DRM_I2C_NXP_TDA998X=m ++CONFIG_DRM_I2C_NXP_TDA9950=m ++CONFIG_DRM_KOMEDA=m ++CONFIG_DRM_RADEON=m ++CONFIG_DRM_AMDGPU=m ++CONFIG_DRM_AMDGPU_SI=y ++CONFIG_DRM_AMDGPU_CIK=y ++CONFIG_DRM_AMDGPU_USERPTR=y ++CONFIG_DRM_AMD_ACP=y ++CONFIG_DRM_AMD_DC_SI=y ++CONFIG_DRM_NOUVEAU=m ++CONFIG_DRM_VGEM=m ++CONFIG_DRM_VKMS=m ++CONFIG_DRM_UDL=m ++CONFIG_DRM_AST=m ++CONFIG_DRM_MGAG200=m ++CONFIG_DRM_QXL=m ++CONFIG_DRM_VIRTIO_GPU=m ++CONFIG_DRM_PANEL_ABT_Y030XX067A=m ++CONFIG_DRM_PANEL_ARM_VERSATILE=m ++CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596=m ++CONFIG_DRM_PANEL_BOE_BF060Y8M_AJ0=m ++CONFIG_DRM_PANEL_BOE_HIMAX8279D=m ++CONFIG_DRM_PANEL_BOE_TV101WUM_NL6=m ++CONFIG_DRM_PANEL_DSI_CM=m ++CONFIG_DRM_PANEL_LVDS=m ++CONFIG_DRM_PANEL_SIMPLE=m ++CONFIG_DRM_PANEL_EDP=m ++CONFIG_DRM_PANEL_EBBG_FT8719=m ++CONFIG_DRM_PANEL_ELIDA_KD35T133=m ++CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02=m ++CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D=m ++CONFIG_DRM_PANEL_ILITEK_IL9322=m ++CONFIG_DRM_PANEL_ILITEK_ILI9341=m ++CONFIG_DRM_PANEL_ILITEK_ILI9881C=m ++CONFIG_DRM_PANEL_INNOLUX_EJ030NA=m ++CONFIG_DRM_PANEL_INNOLUX_P079ZCA=m ++CONFIG_DRM_PANEL_JDI_LT070ME05000=m ++CONFIG_DRM_PANEL_JDI_R63452=m ++CONFIG_DRM_PANEL_KHADAS_TS050=m ++CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04=m ++CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W=m ++CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829=m ++CONFIG_DRM_PANEL_SAMSUNG_LD9040=m ++CONFIG_DRM_PANEL_LG_LB035Q02=m ++CONFIG_DRM_PANEL_LG_LG4573=m ++CONFIG_DRM_PANEL_NEC_NL8048HL11=m ++CONFIG_DRM_PANEL_NEWVISION_NV3052C=m ++CONFIG_DRM_PANEL_NOVATEK_NT35510=m ++CONFIG_DRM_PANEL_NOVATEK_NT35560=m ++CONFIG_DRM_PANEL_NOVATEK_NT35950=m ++CONFIG_DRM_PANEL_NOVATEK_NT36672A=m ++CONFIG_DRM_PANEL_NOVATEK_NT39016=m ++CONFIG_DRM_PANEL_MANTIX_MLAF057WE51=m ++CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO=m ++CONFIG_DRM_PANEL_ORISETECH_OTM8009A=m ++CONFIG_DRM_PANEL_OSD_OSD101T2587_53TS=m ++CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00=m ++CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN=m ++CONFIG_DRM_PANEL_RAYDIUM_RM67191=m ++CONFIG_DRM_PANEL_RAYDIUM_RM68200=m ++CONFIG_DRM_PANEL_RONBO_RB070D30=m ++CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20=m ++CONFIG_DRM_PANEL_SAMSUNG_DB7430=m ++CONFIG_DRM_PANEL_SAMSUNG_S6D16D0=m ++CONFIG_DRM_PANEL_SAMSUNG_S6D27A1=m ++CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2=m ++CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03=m ++CONFIG_DRM_PANEL_SAMSUNG_S6E63M0=m ++CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI=m ++CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01=m ++CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m ++CONFIG_DRM_PANEL_SAMSUNG_SOFEF00=m ++CONFIG_DRM_PANEL_SEIKO_43WVF1G=m ++CONFIG_DRM_PANEL_SHARP_LQ101R1SX01=m ++CONFIG_DRM_PANEL_SHARP_LS037V7DW01=m ++CONFIG_DRM_PANEL_SHARP_LS043T1LE01=m ++CONFIG_DRM_PANEL_SHARP_LS060T1SX01=m ++CONFIG_DRM_PANEL_SITRONIX_ST7701=m ++CONFIG_DRM_PANEL_SITRONIX_ST7703=m ++CONFIG_DRM_PANEL_SITRONIX_ST7789V=m ++CONFIG_DRM_PANEL_SONY_ACX565AKM=m ++CONFIG_DRM_PANEL_SONY_TULIP_TRULY_NT35521=m ++CONFIG_DRM_PANEL_TDO_TL070WSH30=m ++CONFIG_DRM_PANEL_TPO_TD028TTEC1=m ++CONFIG_DRM_PANEL_TPO_TD043MTEA1=m ++CONFIG_DRM_PANEL_TPO_TPG110=m ++CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA=m ++CONFIG_DRM_PANEL_VISIONOX_RM69299=m ++CONFIG_DRM_PANEL_WIDECHIPS_WS2401=m ++CONFIG_DRM_PANEL_XINPENG_XPP055C272=m ++CONFIG_DRM_CHIPONE_ICN6211=m ++CONFIG_DRM_CHRONTEL_CH7033=m ++CONFIG_DRM_DISPLAY_CONNECTOR=m ++CONFIG_DRM_ITE_IT6505=m ++CONFIG_DRM_LONTIUM_LT8912B=m ++CONFIG_DRM_LONTIUM_LT9211=m ++CONFIG_DRM_LONTIUM_LT9611=m ++CONFIG_DRM_LONTIUM_LT9611UXC=m ++CONFIG_DRM_ITE_IT66121=m ++CONFIG_DRM_LVDS_CODEC=m ++CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW=m ++CONFIG_DRM_NWL_MIPI_DSI=m ++CONFIG_DRM_NXP_PTN3460=m ++CONFIG_DRM_PARADE_PS8622=m ++CONFIG_DRM_PARADE_PS8640=m ++CONFIG_DRM_SIL_SII8620=m ++CONFIG_DRM_SII902X=m ++CONFIG_DRM_SII9234=m ++CONFIG_DRM_SIMPLE_BRIDGE=m ++CONFIG_DRM_THINE_THC63LVD1024=m ++CONFIG_DRM_TOSHIBA_TC358762=m ++CONFIG_DRM_TOSHIBA_TC358764=m ++CONFIG_DRM_TOSHIBA_TC358767=m ++CONFIG_DRM_TOSHIBA_TC358768=m ++CONFIG_DRM_TOSHIBA_TC358775=m ++CONFIG_DRM_TI_DLPC3433=m ++CONFIG_DRM_TI_TFP410=m ++CONFIG_DRM_TI_SN65DSI83=m ++CONFIG_DRM_TI_SN65DSI86=m ++CONFIG_DRM_TI_TPD12S015=m ++CONFIG_DRM_ANALOGIX_ANX6345=m ++CONFIG_DRM_ANALOGIX_ANX78XX=m ++CONFIG_DRM_ANALOGIX_ANX7625=m ++CONFIG_DRM_I2C_ADV7511=m ++CONFIG_DRM_I2C_ADV7511_AUDIO=y ++CONFIG_DRM_CDNS_DSI=m ++CONFIG_DRM_CDNS_MHDP8546=m ++CONFIG_DRM_LOGICVC=m ++CONFIG_DRM_ARCPGU=m ++CONFIG_DRM_BOCHS=m ++CONFIG_DRM_CIRRUS_QEMU=m ++CONFIG_DRM_GM12U320=m ++CONFIG_DRM_PANEL_MIPI_DBI=m ++CONFIG_DRM_SIMPLEDRM=y ++CONFIG_TINYDRM_HX8357D=m ++CONFIG_TINYDRM_ILI9163=m ++CONFIG_TINYDRM_ILI9225=m ++CONFIG_TINYDRM_ILI9341=m ++CONFIG_TINYDRM_ILI9486=m ++CONFIG_TINYDRM_MI0283QT=m ++CONFIG_TINYDRM_REPAPER=m ++CONFIG_TINYDRM_ST7586=m ++CONFIG_TINYDRM_ST7735R=m ++CONFIG_DRM_GUD=m ++CONFIG_DRM_SSD130X=m ++CONFIG_DRM_SSD130X_I2C=m ++CONFIG_DRM_SSD130X_SPI=m ++CONFIG_FB=y ++CONFIG_FB_CIRRUS=m ++CONFIG_FB_PM2=m ++CONFIG_FB_PM2_FIFO_DISCONNECT=y ++CONFIG_FB_CYBER2000=m ++CONFIG_FB_ASILIANT=y ++CONFIG_FB_IMSTT=y ++CONFIG_FB_UVESA=m ++CONFIG_FB_OPENCORES=m ++CONFIG_FB_S1D13XXX=m ++CONFIG_FB_NVIDIA=m ++CONFIG_FB_NVIDIA_I2C=y ++CONFIG_FB_RIVA=m ++CONFIG_FB_RIVA_I2C=y ++CONFIG_FB_I740=m ++CONFIG_FB_MATROX=m ++CONFIG_FB_MATROX_MILLENIUM=y ++CONFIG_FB_MATROX_MYSTIQUE=y ++CONFIG_FB_MATROX_G=y ++CONFIG_FB_MATROX_I2C=m ++CONFIG_FB_MATROX_MAVEN=m ++CONFIG_FB_RADEON=m ++CONFIG_FB_ATY128=m ++CONFIG_FB_ATY=m ++CONFIG_FB_ATY_CT=y ++CONFIG_FB_ATY_GX=y ++CONFIG_FB_S3=m ++CONFIG_FB_SAVAGE=m ++CONFIG_FB_SAVAGE_I2C=y ++CONFIG_FB_SIS=m ++CONFIG_FB_SIS_300=y ++CONFIG_FB_SIS_315=y ++CONFIG_FB_NEOMAGIC=m ++CONFIG_FB_KYRO=m ++CONFIG_FB_3DFX=m ++# CONFIG_FB_3DFX_I2C is not set ++CONFIG_FB_VOODOO1=m ++CONFIG_FB_VT8623=m ++CONFIG_FB_TRIDENT=m ++CONFIG_FB_ARK=m ++CONFIG_FB_PM3=m ++CONFIG_FB_CARMINE=m ++CONFIG_FB_SM501=m ++CONFIG_FB_SMSCUFX=m ++CONFIG_FB_UDL=m ++CONFIG_FB_GOLDFISH=m ++CONFIG_FB_METRONOME=m ++CONFIG_FB_MB862XX=m ++CONFIG_FB_SSD1307=m ++CONFIG_FB_SM712=m ++CONFIG_FIRMWARE_EDID=y ++CONFIG_LCD_CLASS_DEVICE=m ++CONFIG_LCD_L4F00242T03=m ++CONFIG_LCD_LMS283GF05=m ++CONFIG_LCD_LTV350QV=m ++CONFIG_LCD_ILI922X=m ++CONFIG_LCD_TDO24M=m ++CONFIG_LCD_VGG2432A4=m ++CONFIG_LCD_PLATFORM=m ++CONFIG_LCD_AMS369FG06=m ++CONFIG_LCD_LMS501KF03=m ++CONFIG_LCD_HX8357=m ++CONFIG_LCD_OTM3225A=m ++CONFIG_BACKLIGHT_KTD253=m ++CONFIG_BACKLIGHT_LM3533=m ++CONFIG_BACKLIGHT_DA903X=m ++CONFIG_BACKLIGHT_DA9052=m ++CONFIG_BACKLIGHT_MAX8925=m ++CONFIG_BACKLIGHT_MT6370=m ++CONFIG_BACKLIGHT_QCOM_WLED=m ++CONFIG_BACKLIGHT_RT4831=m ++CONFIG_BACKLIGHT_WM831X=m ++CONFIG_BACKLIGHT_ADP5520=m ++CONFIG_BACKLIGHT_ADP8860=m ++CONFIG_BACKLIGHT_ADP8870=m ++CONFIG_BACKLIGHT_88PM860X=m ++CONFIG_BACKLIGHT_PCF50633=m ++CONFIG_BACKLIGHT_AAT2870=m ++CONFIG_BACKLIGHT_LM3639=m ++CONFIG_BACKLIGHT_PANDORA=m ++CONFIG_BACKLIGHT_SKY81452=m ++CONFIG_BACKLIGHT_TPS65217=m ++CONFIG_BACKLIGHT_AS3711=m ++CONFIG_BACKLIGHT_GPIO=m ++CONFIG_BACKLIGHT_LV5207LP=m ++CONFIG_BACKLIGHT_BD6107=m ++CONFIG_BACKLIGHT_ARCXCNN=m ++CONFIG_BACKLIGHT_RAVE_SP=m ++CONFIG_BACKLIGHT_LED=m ++CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y ++CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER=y ++CONFIG_SOUND=m ++# CONFIG_SOUND_OSS_CORE_PRECLAIM is not set ++CONFIG_SND=m ++CONFIG_SND_OSSEMUL=y ++CONFIG_SND_MIXER_OSS=m ++CONFIG_SND_HRTIMER=m ++# CONFIG_SND_CTL_FAST_LOOKUP is not set ++CONFIG_SND_SEQUENCER=m ++CONFIG_SND_SEQ_DUMMY=m ++CONFIG_SND_DUMMY=m ++CONFIG_SND_ALOOP=m ++CONFIG_SND_VIRMIDI=m ++CONFIG_SND_MTPAV=m ++CONFIG_SND_MTS64=m ++CONFIG_SND_SERIAL_U16550=m ++CONFIG_SND_SERIAL_GENERIC=m ++CONFIG_SND_MPU401=m ++CONFIG_SND_PORTMAN2X4=m ++CONFIG_SND_AC97_POWER_SAVE=y ++CONFIG_SND_AD1889=m ++CONFIG_SND_ATIIXP=m ++CONFIG_SND_ATIIXP_MODEM=m ++CONFIG_SND_AU8810=m ++CONFIG_SND_AU8820=m ++CONFIG_SND_AU8830=m ++CONFIG_SND_AW2=m ++CONFIG_SND_BT87X=m ++CONFIG_SND_CA0106=m ++CONFIG_SND_CMIPCI=m ++CONFIG_SND_OXYGEN=m ++CONFIG_SND_CS4281=m ++CONFIG_SND_CS46XX=m ++CONFIG_SND_CTXFI=m ++CONFIG_SND_DARLA20=m ++CONFIG_SND_GINA20=m ++CONFIG_SND_LAYLA20=m ++CONFIG_SND_DARLA24=m ++CONFIG_SND_GINA24=m ++CONFIG_SND_LAYLA24=m ++CONFIG_SND_MONA=m ++CONFIG_SND_MIA=m ++CONFIG_SND_ECHO3G=m ++CONFIG_SND_INDIGO=m ++CONFIG_SND_INDIGOIO=m ++CONFIG_SND_INDIGODJ=m ++CONFIG_SND_INDIGOIOX=m ++CONFIG_SND_INDIGODJX=m ++CONFIG_SND_ENS1370=m ++CONFIG_SND_ENS1371=m ++CONFIG_SND_FM801=m ++CONFIG_SND_FM801_TEA575X_BOOL=y ++CONFIG_SND_HDSP=m ++CONFIG_SND_HDSPM=m ++CONFIG_SND_ICE1724=m ++CONFIG_SND_INTEL8X0=m ++CONFIG_SND_INTEL8X0M=m ++CONFIG_SND_KORG1212=m ++CONFIG_SND_LOLA=m ++CONFIG_SND_LX6464ES=m ++CONFIG_SND_MIXART=m ++CONFIG_SND_NM256=m ++CONFIG_SND_PCXHR=m ++CONFIG_SND_RIPTIDE=m ++CONFIG_SND_RME32=m ++CONFIG_SND_RME96=m ++CONFIG_SND_RME9652=m ++CONFIG_SND_VIA82XX=m ++CONFIG_SND_VIA82XX_MODEM=m ++CONFIG_SND_VIRTUOSO=m ++CONFIG_SND_VX222=m ++CONFIG_SND_YMFPCI=m ++CONFIG_SND_HDA_INTEL=m ++CONFIG_SND_HDA_HWDEP=y ++CONFIG_SND_HDA_INPUT_BEEP=y ++CONFIG_SND_HDA_INPUT_BEEP_MODE=0 ++CONFIG_SND_HDA_PATCH_LOADER=y ++CONFIG_SND_HDA_CODEC_REALTEK=m ++CONFIG_SND_HDA_CODEC_ANALOG=m ++CONFIG_SND_HDA_CODEC_SIGMATEL=m ++CONFIG_SND_HDA_CODEC_VIA=m ++CONFIG_SND_HDA_CODEC_HDMI=m ++CONFIG_SND_HDA_CODEC_CIRRUS=m ++CONFIG_SND_HDA_CODEC_CS8409=m ++CONFIG_SND_HDA_CODEC_CONEXANT=m ++CONFIG_SND_HDA_CODEC_CA0110=m ++CONFIG_SND_HDA_CODEC_CA0132=m ++CONFIG_SND_HDA_CODEC_CMEDIA=m ++CONFIG_SND_HDA_CODEC_SI3054=m ++CONFIG_SND_HDA_POWER_SAVE_DEFAULT=1 ++CONFIG_SND_USB_AUDIO=m ++CONFIG_SND_USB_UA101=m ++CONFIG_SND_USB_CAIAQ=m ++CONFIG_SND_USB_CAIAQ_INPUT=y ++CONFIG_SND_USB_6FIRE=m ++CONFIG_SND_USB_HIFACE=m ++CONFIG_SND_BCD2000=m ++CONFIG_SND_USB_POD=m ++CONFIG_SND_USB_PODHD=m ++CONFIG_SND_USB_TONEPORT=m ++CONFIG_SND_USB_VARIAX=m ++CONFIG_SND_DICE=m ++CONFIG_SND_OXFW=m ++CONFIG_SND_ISIGHT=m ++CONFIG_SND_FIREWORKS=m ++CONFIG_SND_BEBOB=m ++CONFIG_SND_FIREWIRE_DIGI00X=m ++CONFIG_SND_FIREWIRE_TASCAM=m ++CONFIG_SND_FIREWIRE_MOTU=m ++CONFIG_SND_FIREFACE=m ++CONFIG_SND_SOC=m ++CONFIG_SND_SOC_ADI=m ++CONFIG_SND_SOC_ADI_AXI_I2S=m ++CONFIG_SND_SOC_ADI_AXI_SPDIF=m ++CONFIG_SND_SOC_AMD_ACP=m ++CONFIG_SND_SOC_AMD_CZ_RT5645_MACH=m ++CONFIG_SND_AMD_ACP_CONFIG=m ++CONFIG_SND_ATMEL_SOC=m ++CONFIG_SND_SOC_MIKROE_PROTO=m ++CONFIG_SND_BCM63XX_I2S_WHISTLER=m ++CONFIG_SND_DESIGNWARE_I2S=m ++CONFIG_SND_DESIGNWARE_PCM=y ++CONFIG_SND_SOC_FSL_ASRC=m ++CONFIG_SND_SOC_FSL_SAI=m ++CONFIG_SND_SOC_FSL_MQS=m ++CONFIG_SND_SOC_FSL_AUDMIX=m ++CONFIG_SND_SOC_FSL_SSI=m ++CONFIG_SND_SOC_FSL_SPDIF=m ++CONFIG_SND_SOC_FSL_ESAI=m ++CONFIG_SND_SOC_FSL_MICFIL=m ++CONFIG_SND_SOC_FSL_EASRC=m ++CONFIG_SND_SOC_FSL_XCVR=m ++CONFIG_SND_SOC_FSL_RPMSG=m ++CONFIG_SND_SOC_IMX_AUDMUX=m ++CONFIG_SND_I2S_HI6210_I2S=m ++CONFIG_SND_SOC_IMG=y ++CONFIG_SND_SOC_IMG_I2S_IN=m ++CONFIG_SND_SOC_IMG_I2S_OUT=m ++CONFIG_SND_SOC_IMG_PARALLEL_OUT=m ++CONFIG_SND_SOC_IMG_SPDIF_IN=m ++CONFIG_SND_SOC_IMG_SPDIF_OUT=m ++CONFIG_SND_SOC_IMG_PISTACHIO_INTERNAL_DAC=m ++CONFIG_SND_SOC_MTK_BTCVSD=m ++CONFIG_SND_SOC_SOF_TOPLEVEL=y ++CONFIG_SND_SOC_SOF_PCI=m ++CONFIG_SND_SOC_SOF_OF=m ++CONFIG_SND_SOC_XILINX_I2S=m ++CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER=m ++CONFIG_SND_SOC_XILINX_SPDIF=m ++CONFIG_SND_SOC_XTFPGA_I2S=m ++CONFIG_SND_SOC_AC97_CODEC=m ++CONFIG_SND_SOC_ADAU1372_I2C=m ++CONFIG_SND_SOC_ADAU1372_SPI=m ++CONFIG_SND_SOC_ADAU1701=m ++CONFIG_SND_SOC_ADAU1761_I2C=m ++CONFIG_SND_SOC_ADAU1761_SPI=m ++CONFIG_SND_SOC_ADAU7002=m ++CONFIG_SND_SOC_ADAU7118_HW=m ++CONFIG_SND_SOC_ADAU7118_I2C=m ++CONFIG_SND_SOC_AK4104=m ++CONFIG_SND_SOC_AK4118=m ++CONFIG_SND_SOC_AK4375=m ++CONFIG_SND_SOC_AK4458=m ++CONFIG_SND_SOC_AK4554=m ++CONFIG_SND_SOC_AK4613=m ++CONFIG_SND_SOC_AK4642=m ++CONFIG_SND_SOC_AK5386=m ++CONFIG_SND_SOC_AK5558=m ++CONFIG_SND_SOC_ALC5623=m ++CONFIG_SND_SOC_AW8738=m ++CONFIG_SND_SOC_BD28623=m ++CONFIG_SND_SOC_BT_SCO=m ++CONFIG_SND_SOC_CPCAP=m ++CONFIG_SND_SOC_CS35L32=m ++CONFIG_SND_SOC_CS35L33=m ++CONFIG_SND_SOC_CS35L34=m ++CONFIG_SND_SOC_CS35L35=m ++CONFIG_SND_SOC_CS35L36=m ++CONFIG_SND_SOC_CS35L41_SPI=m ++CONFIG_SND_SOC_CS35L41_I2C=m ++CONFIG_SND_SOC_CS35L45_SPI=m ++CONFIG_SND_SOC_CS35L45_I2C=m ++CONFIG_SND_SOC_CS42L42=m ++CONFIG_SND_SOC_CS42L51_I2C=m ++CONFIG_SND_SOC_CS42L52=m ++CONFIG_SND_SOC_CS42L56=m ++CONFIG_SND_SOC_CS42L73=m ++CONFIG_SND_SOC_CS42L83=m ++CONFIG_SND_SOC_CS4234=m ++CONFIG_SND_SOC_CS4265=m ++CONFIG_SND_SOC_CS4270=m ++CONFIG_SND_SOC_CS4271_I2C=m ++CONFIG_SND_SOC_CS4271_SPI=m ++CONFIG_SND_SOC_CS42XX8_I2C=m ++CONFIG_SND_SOC_CS43130=m ++CONFIG_SND_SOC_CS4341=m ++CONFIG_SND_SOC_CS4349=m ++CONFIG_SND_SOC_CS53L30=m ++CONFIG_SND_SOC_CX2072X=m ++CONFIG_SND_SOC_DA7213=m ++CONFIG_SND_SOC_DMIC=m ++CONFIG_SND_SOC_ES7134=m ++CONFIG_SND_SOC_ES7241=m ++CONFIG_SND_SOC_ES8316=m ++CONFIG_SND_SOC_ES8326=m ++CONFIG_SND_SOC_ES8328_I2C=m ++CONFIG_SND_SOC_ES8328_SPI=m ++CONFIG_SND_SOC_GTM601=m ++CONFIG_SND_SOC_HDA=m ++CONFIG_SND_SOC_ICS43432=m ++CONFIG_SND_SOC_INNO_RK3036=m ++CONFIG_SND_SOC_LOCHNAGAR_SC=m ++CONFIG_SND_SOC_MAX98088=m ++CONFIG_SND_SOC_MAX98357A=m ++CONFIG_SND_SOC_MAX98504=m ++CONFIG_SND_SOC_MAX9867=m ++CONFIG_SND_SOC_MAX98927=m ++CONFIG_SND_SOC_MAX98520=m ++CONFIG_SND_SOC_MAX98373_I2C=m ++CONFIG_SND_SOC_MAX98373_SDW=m ++CONFIG_SND_SOC_MAX98390=m ++CONFIG_SND_SOC_MAX98396=m ++CONFIG_SND_SOC_MAX9860=m ++CONFIG_SND_SOC_MSM8916_WCD_ANALOG=m ++CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=m ++CONFIG_SND_SOC_PCM1681=m ++CONFIG_SND_SOC_PCM1789_I2C=m ++CONFIG_SND_SOC_PCM179X_I2C=m ++CONFIG_SND_SOC_PCM179X_SPI=m ++CONFIG_SND_SOC_PCM186X_I2C=m ++CONFIG_SND_SOC_PCM186X_SPI=m ++CONFIG_SND_SOC_PCM3060_I2C=m ++CONFIG_SND_SOC_PCM3060_SPI=m ++CONFIG_SND_SOC_PCM3168A_I2C=m ++CONFIG_SND_SOC_PCM3168A_SPI=m ++CONFIG_SND_SOC_PCM5102A=m ++CONFIG_SND_SOC_PCM512x_I2C=m ++CONFIG_SND_SOC_PCM512x_SPI=m ++CONFIG_SND_SOC_RK3328=m ++CONFIG_SND_SOC_RT1308_SDW=m ++CONFIG_SND_SOC_RT1316_SDW=m ++CONFIG_SND_SOC_RT5616=m ++CONFIG_SND_SOC_RT5631=m ++CONFIG_SND_SOC_RT5640=m ++CONFIG_SND_SOC_RT5659=m ++CONFIG_SND_SOC_RT5682_SDW=m ++CONFIG_SND_SOC_RT700_SDW=m ++CONFIG_SND_SOC_RT711_SDW=m ++CONFIG_SND_SOC_RT711_SDCA_SDW=m ++CONFIG_SND_SOC_RT715_SDW=m ++CONFIG_SND_SOC_RT715_SDCA_SDW=m ++CONFIG_SND_SOC_RT9120=m ++CONFIG_SND_SOC_SDW_MOCKUP=m ++CONFIG_SND_SOC_SGTL5000=m ++CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m ++CONFIG_SND_SOC_SIMPLE_MUX=m ++CONFIG_SND_SOC_SPDIF=m ++CONFIG_SND_SOC_SRC4XXX_I2C=m ++CONFIG_SND_SOC_SSM2305=m ++CONFIG_SND_SOC_SSM2518=m ++CONFIG_SND_SOC_SSM2602_SPI=m ++CONFIG_SND_SOC_SSM2602_I2C=m ++CONFIG_SND_SOC_SSM4567=m ++CONFIG_SND_SOC_STA32X=m ++CONFIG_SND_SOC_STA350=m ++CONFIG_SND_SOC_STI_SAS=m ++CONFIG_SND_SOC_TAS2552=m ++CONFIG_SND_SOC_TAS2562=m ++CONFIG_SND_SOC_TAS2764=m ++CONFIG_SND_SOC_TAS2770=m ++CONFIG_SND_SOC_TAS2780=m ++CONFIG_SND_SOC_TAS5086=m ++CONFIG_SND_SOC_TAS571X=m ++CONFIG_SND_SOC_TAS5720=m ++CONFIG_SND_SOC_TAS5805M=m ++CONFIG_SND_SOC_TAS6424=m ++CONFIG_SND_SOC_TDA7419=m ++CONFIG_SND_SOC_TFA9879=m ++CONFIG_SND_SOC_TFA989X=m ++CONFIG_SND_SOC_TLV320ADC3XXX=m ++CONFIG_SND_SOC_TLV320AIC23_I2C=m ++CONFIG_SND_SOC_TLV320AIC23_SPI=m ++CONFIG_SND_SOC_TLV320AIC31XX=m ++CONFIG_SND_SOC_TLV320AIC32X4_I2C=m ++CONFIG_SND_SOC_TLV320AIC32X4_SPI=m ++CONFIG_SND_SOC_TLV320AIC3X_I2C=m ++CONFIG_SND_SOC_TLV320AIC3X_SPI=m ++CONFIG_SND_SOC_TLV320ADCX140=m ++CONFIG_SND_SOC_TS3A227E=m ++CONFIG_SND_SOC_TSCS42XX=m ++CONFIG_SND_SOC_TSCS454=m ++CONFIG_SND_SOC_UDA1334=m ++CONFIG_SND_SOC_WCD9335=m ++CONFIG_SND_SOC_WCD934X=m ++CONFIG_SND_SOC_WCD938X_SDW=m ++CONFIG_SND_SOC_WM8510=m ++CONFIG_SND_SOC_WM8523=m ++CONFIG_SND_SOC_WM8524=m ++CONFIG_SND_SOC_WM8580=m ++CONFIG_SND_SOC_WM8711=m ++CONFIG_SND_SOC_WM8728=m ++CONFIG_SND_SOC_WM8731_I2C=m ++CONFIG_SND_SOC_WM8731_SPI=m ++CONFIG_SND_SOC_WM8737=m ++CONFIG_SND_SOC_WM8741=m ++CONFIG_SND_SOC_WM8750=m ++CONFIG_SND_SOC_WM8753=m ++CONFIG_SND_SOC_WM8770=m ++CONFIG_SND_SOC_WM8776=m ++CONFIG_SND_SOC_WM8782=m ++CONFIG_SND_SOC_WM8804_I2C=m ++CONFIG_SND_SOC_WM8804_SPI=m ++CONFIG_SND_SOC_WM8903=m ++CONFIG_SND_SOC_WM8904=m ++CONFIG_SND_SOC_WM8940=m ++CONFIG_SND_SOC_WM8960=m ++CONFIG_SND_SOC_WM8962=m ++CONFIG_SND_SOC_WM8974=m ++CONFIG_SND_SOC_WM8978=m ++CONFIG_SND_SOC_WM8985=m ++CONFIG_SND_SOC_WSA881X=m ++CONFIG_SND_SOC_WSA883X=m ++CONFIG_SND_SOC_ZL38060=m ++CONFIG_SND_SOC_MAX9759=m ++CONFIG_SND_SOC_MT6351=m ++CONFIG_SND_SOC_MT6358=m ++CONFIG_SND_SOC_MT6660=m ++CONFIG_SND_SOC_NAU8315=m ++CONFIG_SND_SOC_NAU8540=m ++CONFIG_SND_SOC_NAU8810=m ++CONFIG_SND_SOC_NAU8821=m ++CONFIG_SND_SOC_NAU8822=m ++CONFIG_SND_SOC_NAU8824=m ++CONFIG_SND_SOC_TPA6130A2=m ++CONFIG_SND_SOC_LPASS_WSA_MACRO=m ++CONFIG_SND_SOC_LPASS_VA_MACRO=m ++CONFIG_SND_SOC_LPASS_RX_MACRO=m ++CONFIG_SND_SOC_LPASS_TX_MACRO=m ++CONFIG_SND_SIMPLE_CARD=m ++CONFIG_SND_AUDIO_GRAPH_CARD=m ++CONFIG_SND_AUDIO_GRAPH_CARD2=m ++CONFIG_SND_AUDIO_GRAPH_CARD2_CUSTOM_SAMPLE=m ++CONFIG_SND_TEST_COMPONENT=m ++CONFIG_SND_VIRTIO=m ++CONFIG_HID=m ++CONFIG_HID_BATTERY_STRENGTH=y ++CONFIG_HIDRAW=y ++CONFIG_UHID=m ++CONFIG_HID_A4TECH=m ++CONFIG_HID_ACCUTOUCH=m ++CONFIG_HID_ACRUX=m ++CONFIG_HID_ACRUX_FF=y ++CONFIG_HID_APPLE=m ++CONFIG_HID_APPLEIR=m ++CONFIG_HID_ASUS=m ++CONFIG_HID_AUREAL=m ++CONFIG_HID_BELKIN=m ++CONFIG_HID_BETOP_FF=m ++CONFIG_HID_BIGBEN_FF=m ++CONFIG_HID_CHERRY=m ++CONFIG_HID_CHICONY=m ++CONFIG_HID_CORSAIR=m ++CONFIG_HID_COUGAR=m ++CONFIG_HID_MACALLY=m ++CONFIG_HID_PRODIKEYS=m ++CONFIG_HID_CMEDIA=m ++CONFIG_HID_CP2112=m ++CONFIG_HID_CREATIVE_SB0540=m ++CONFIG_HID_CYPRESS=m ++CONFIG_HID_DRAGONRISE=m ++CONFIG_DRAGONRISE_FF=y ++CONFIG_HID_EMS_FF=m ++CONFIG_HID_ELAN=m ++CONFIG_HID_ELECOM=m ++CONFIG_HID_ELO=m ++CONFIG_HID_EZKEY=m ++CONFIG_HID_FT260=m ++CONFIG_HID_GEMBIRD=m ++CONFIG_HID_GFRM=m ++CONFIG_HID_GLORIOUS=m ++CONFIG_HID_HOLTEK=m ++CONFIG_HOLTEK_FF=y ++CONFIG_HID_VIVALDI=m ++CONFIG_HID_GT683R=m ++CONFIG_HID_KEYTOUCH=m ++CONFIG_HID_KYE=m ++CONFIG_HID_UCLOGIC=m ++CONFIG_HID_WALTOP=m ++CONFIG_HID_VIEWSONIC=m ++CONFIG_HID_VRC2=m ++CONFIG_HID_XIAOMI=m ++CONFIG_HID_GYRATION=m ++CONFIG_HID_ICADE=m ++CONFIG_HID_ITE=m ++CONFIG_HID_JABRA=m ++CONFIG_HID_TWINHAN=m ++CONFIG_HID_KENSINGTON=m ++CONFIG_HID_LCPOWER=m ++CONFIG_HID_LENOVO=m ++CONFIG_HID_LETSKETCH=m ++CONFIG_HID_LOGITECH=m ++CONFIG_HID_LOGITECH_DJ=m ++CONFIG_LOGITECH_FF=y ++CONFIG_LOGIRUMBLEPAD2_FF=y ++CONFIG_LOGIG940_FF=y ++CONFIG_HID_MAGICMOUSE=m ++CONFIG_HID_MALTRON=m ++CONFIG_HID_MAYFLASH=m ++CONFIG_HID_MEGAWORLD_FF=m ++CONFIG_HID_REDRAGON=m ++CONFIG_HID_MICROSOFT=m ++CONFIG_HID_MONTEREY=m ++CONFIG_HID_MULTITOUCH=m ++CONFIG_HID_NINTENDO=m ++CONFIG_NINTENDO_FF=y ++CONFIG_HID_NTI=m ++CONFIG_HID_NTRIG=m ++CONFIG_HID_ORTEK=m ++CONFIG_HID_PANTHERLORD=m ++CONFIG_PANTHERLORD_FF=y ++CONFIG_HID_PENMOUNT=m ++CONFIG_HID_PETALYNX=m ++CONFIG_HID_PICOLCD=m ++CONFIG_HID_PICOLCD_FB=y ++CONFIG_HID_PICOLCD_BACKLIGHT=y ++CONFIG_HID_PICOLCD_LCD=y ++CONFIG_HID_PICOLCD_LEDS=y ++CONFIG_HID_PICOLCD_CIR=y ++CONFIG_HID_PLANTRONICS=m ++CONFIG_HID_PLAYSTATION=m ++CONFIG_PLAYSTATION_FF=y ++CONFIG_HID_PXRC=m ++CONFIG_HID_RAZER=m ++CONFIG_HID_PRIMAX=m ++CONFIG_HID_RETRODE=m ++CONFIG_HID_ROCCAT=m ++CONFIG_HID_SAITEK=m ++CONFIG_HID_SAMSUNG=m ++CONFIG_HID_SEMITEK=m ++CONFIG_HID_SIGMAMICRO=m ++CONFIG_HID_SONY=m ++CONFIG_SONY_FF=y ++CONFIG_HID_SPEEDLINK=m ++CONFIG_HID_STEAM=m ++CONFIG_HID_STEELSERIES=m ++CONFIG_HID_SUNPLUS=m ++CONFIG_HID_RMI=m ++CONFIG_HID_GREENASIA=m ++CONFIG_GREENASIA_FF=y ++CONFIG_HID_SMARTJOYPLUS=m ++CONFIG_SMARTJOYPLUS_FF=y ++CONFIG_HID_TIVO=m ++CONFIG_HID_TOPSEED=m ++CONFIG_HID_TOPRE=m ++CONFIG_HID_THINGM=m ++CONFIG_HID_THRUSTMASTER=m ++CONFIG_THRUSTMASTER_FF=y ++CONFIG_HID_UDRAW_PS3=m ++CONFIG_HID_U2FZERO=m ++CONFIG_HID_WACOM=m ++CONFIG_HID_WIIMOTE=m ++CONFIG_HID_XINMO=m ++CONFIG_HID_ZEROPLUS=m ++CONFIG_ZEROPLUS_FF=y ++CONFIG_HID_ZYDACRON=m ++CONFIG_HID_SENSOR_HUB=m ++CONFIG_HID_SENSOR_CUSTOM_SENSOR=m ++CONFIG_HID_ALPS=m ++CONFIG_HID_MCP2221=m ++CONFIG_USB_HID=m ++CONFIG_HID_PID=y ++CONFIG_USB_HIDDEV=y ++CONFIG_USB_KBD=m ++CONFIG_USB_MOUSE=m ++CONFIG_I2C_HID_OF=m ++CONFIG_I2C_HID_OF_ELAN=m ++CONFIG_I2C_HID_OF_GOODIX=m ++CONFIG_USB_LED_TRIG=y ++CONFIG_USB_CONN_GPIO=m ++CONFIG_USB=y ++CONFIG_USB_ANNOUNCE_NEW_DEVICES=y ++CONFIG_USB_DYNAMIC_MINORS=y ++CONFIG_USB_OTG=y ++CONFIG_USB_OTG_FSM=m ++CONFIG_USB_LEDS_TRIGGER_USBPORT=m ++CONFIG_USB_MON=m ++CONFIG_USB_C67X00_HCD=m ++CONFIG_USB_XHCI_HCD=y ++CONFIG_USB_XHCI_DBGCAP=y ++CONFIG_USB_XHCI_PCI_RENESAS=m ++CONFIG_USB_XHCI_PLATFORM=y ++CONFIG_USB_EHCI_HCD=y ++CONFIG_USB_EHCI_FSL=m ++CONFIG_USB_EHCI_HCD_PLATFORM=y ++CONFIG_USB_OXU210HP_HCD=m ++CONFIG_USB_ISP116X_HCD=m ++CONFIG_USB_MAX3421_HCD=m ++CONFIG_USB_OHCI_HCD=y ++CONFIG_USB_OHCI_HCD_PLATFORM=y ++CONFIG_USB_UHCI_HCD=y ++CONFIG_USB_SL811_HCD=m ++CONFIG_USB_SL811_HCD_ISO=y ++CONFIG_USB_R8A66597_HCD=m ++CONFIG_USB_HCD_BCMA=m ++CONFIG_USB_HCD_SSB=m ++CONFIG_USB_PRINTER=m ++CONFIG_USB_TMC=m ++CONFIG_USB_STORAGE=m ++CONFIG_USB_STORAGE_REALTEK=m ++CONFIG_USB_STORAGE_DATAFAB=m ++CONFIG_USB_STORAGE_FREECOM=m ++CONFIG_USB_STORAGE_ISD200=m ++CONFIG_USB_STORAGE_USBAT=m ++CONFIG_USB_STORAGE_SDDR09=m ++CONFIG_USB_STORAGE_SDDR55=m ++CONFIG_USB_STORAGE_JUMPSHOT=m ++CONFIG_USB_STORAGE_ALAUDA=m ++CONFIG_USB_STORAGE_ONETOUCH=m ++CONFIG_USB_STORAGE_KARMA=m ++CONFIG_USB_STORAGE_CYPRESS_ATACB=m ++CONFIG_USB_STORAGE_ENE_UB6250=m ++CONFIG_USB_UAS=m ++CONFIG_USB_MDC800=m ++CONFIG_USB_MICROTEK=m ++CONFIG_USBIP_CORE=m ++CONFIG_USBIP_VHCI_HCD=m ++CONFIG_USBIP_HOST=m ++CONFIG_USBIP_VUDC=m ++CONFIG_USB_CDNS_SUPPORT=m ++CONFIG_USB_CDNS3=m ++CONFIG_USB_CDNS3_GADGET=y ++CONFIG_USB_CDNS3_HOST=y ++CONFIG_USB_MUSB_HDRC=m ++CONFIG_USB_MUSB_POLARFIRE_SOC=m ++CONFIG_MUSB_PIO_ONLY=y ++CONFIG_USB_DWC3=m ++CONFIG_USB_DWC3_ULPI=y ++CONFIG_USB_DWC2=y ++CONFIG_USB_DWC2_PCI=m ++CONFIG_USB_CHIPIDEA=m ++CONFIG_USB_CHIPIDEA_UDC=y ++CONFIG_USB_CHIPIDEA_HOST=y ++CONFIG_USB_ISP1760=m ++CONFIG_USB_SERIAL=m ++CONFIG_USB_SERIAL_GENERIC=y ++CONFIG_USB_SERIAL_SIMPLE=m ++CONFIG_USB_SERIAL_AIRCABLE=m ++CONFIG_USB_SERIAL_ARK3116=m ++CONFIG_USB_SERIAL_BELKIN=m ++CONFIG_USB_SERIAL_CH341=m ++CONFIG_USB_SERIAL_WHITEHEAT=m ++CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m ++CONFIG_USB_SERIAL_CP210X=m ++CONFIG_USB_SERIAL_CYPRESS_M8=m ++CONFIG_USB_SERIAL_EMPEG=m ++CONFIG_USB_SERIAL_FTDI_SIO=m ++CONFIG_USB_SERIAL_VISOR=m ++CONFIG_USB_SERIAL_IPAQ=m ++CONFIG_USB_SERIAL_IR=m ++CONFIG_USB_SERIAL_EDGEPORT=m ++CONFIG_USB_SERIAL_EDGEPORT_TI=m ++CONFIG_USB_SERIAL_F81232=m ++CONFIG_USB_SERIAL_F8153X=m ++CONFIG_USB_SERIAL_GARMIN=m ++CONFIG_USB_SERIAL_IPW=m ++CONFIG_USB_SERIAL_IUU=m ++CONFIG_USB_SERIAL_KEYSPAN_PDA=m ++CONFIG_USB_SERIAL_KEYSPAN=m ++CONFIG_USB_SERIAL_KLSI=m ++CONFIG_USB_SERIAL_KOBIL_SCT=m ++CONFIG_USB_SERIAL_MCT_U232=m ++CONFIG_USB_SERIAL_METRO=m ++CONFIG_USB_SERIAL_MOS7720=m ++CONFIG_USB_SERIAL_MOS7715_PARPORT=y ++CONFIG_USB_SERIAL_MOS7840=m ++CONFIG_USB_SERIAL_MXUPORT=m ++CONFIG_USB_SERIAL_NAVMAN=m ++CONFIG_USB_SERIAL_PL2303=m ++CONFIG_USB_SERIAL_OTI6858=m ++CONFIG_USB_SERIAL_QCAUX=m ++CONFIG_USB_SERIAL_QUALCOMM=m ++CONFIG_USB_SERIAL_SPCP8X5=m ++CONFIG_USB_SERIAL_SAFE=m ++CONFIG_USB_SERIAL_SIERRAWIRELESS=m ++CONFIG_USB_SERIAL_SYMBOL=m ++CONFIG_USB_SERIAL_TI=m ++CONFIG_USB_SERIAL_CYBERJACK=m ++CONFIG_USB_SERIAL_OPTION=m ++CONFIG_USB_SERIAL_OMNINET=m ++CONFIG_USB_SERIAL_OPTICON=m ++CONFIG_USB_SERIAL_XSENS_MT=m ++CONFIG_USB_SERIAL_WISHBONE=m ++CONFIG_USB_SERIAL_SSU100=m ++CONFIG_USB_SERIAL_QT2=m ++CONFIG_USB_SERIAL_UPD78F0730=m ++CONFIG_USB_SERIAL_XR=m ++CONFIG_USB_SERIAL_DEBUG=m ++CONFIG_USB_USS720=m ++CONFIG_USB_EMI62=m ++CONFIG_USB_EMI26=m ++CONFIG_USB_ADUTUX=m ++CONFIG_USB_SEVSEG=m ++CONFIG_USB_LEGOTOWER=m ++CONFIG_USB_LCD=m ++CONFIG_USB_CYPRESS_CY7C63=m ++CONFIG_USB_CYTHERM=m ++CONFIG_USB_IDMOUSE=m ++CONFIG_USB_APPLEDISPLAY=m ++CONFIG_APPLE_MFI_FASTCHARGE=m ++CONFIG_USB_SISUSBVGA=m ++CONFIG_USB_LD=m ++CONFIG_USB_TRANCEVIBRATOR=m ++CONFIG_USB_IOWARRIOR=m ++CONFIG_USB_TEST=m ++CONFIG_USB_EHSET_TEST_FIXTURE=m ++CONFIG_USB_ISIGHTFW=m ++CONFIG_USB_YUREX=m ++CONFIG_USB_HUB_USB251XB=m ++CONFIG_USB_HSIC_USB3503=m ++CONFIG_USB_HSIC_USB4604=m ++CONFIG_USB_LINK_LAYER_TEST=m ++CONFIG_USB_CHAOSKEY=m ++CONFIG_USB_ONBOARD_HUB=m ++CONFIG_USB_ATM=m ++CONFIG_USB_SPEEDTOUCH=m ++CONFIG_USB_CXACRU=m ++CONFIG_USB_UEAGLEATM=m ++CONFIG_USB_XUSBATM=m ++CONFIG_USB_GPIO_VBUS=m ++CONFIG_TAHVO_USB=m ++CONFIG_TAHVO_USB_HOST_BY_DEFAULT=y ++CONFIG_USB_ISP1301=m ++CONFIG_USB_GADGET=m ++CONFIG_U_SERIAL_CONSOLE=y ++CONFIG_USB_GR_UDC=m ++CONFIG_USB_R8A66597=m ++CONFIG_USB_PXA27X=m ++CONFIG_USB_MV_UDC=m ++CONFIG_USB_MV_U3D=m ++CONFIG_USB_SNP_UDC_PLAT=m ++CONFIG_USB_BDC_UDC=m ++CONFIG_USB_AMD5536UDC=m ++CONFIG_USB_NET2272=m ++CONFIG_USB_NET2272_DMA=y ++CONFIG_USB_NET2280=m ++CONFIG_USB_GOKU=m ++CONFIG_USB_EG20T=m ++CONFIG_USB_GADGET_XILINX=m ++CONFIG_USB_MAX3420_UDC=m ++CONFIG_USB_CONFIGFS=m ++CONFIG_USB_CONFIGFS_SERIAL=y ++CONFIG_USB_CONFIGFS_ACM=y ++CONFIG_USB_CONFIGFS_OBEX=y ++CONFIG_USB_CONFIGFS_NCM=y ++CONFIG_USB_CONFIGFS_ECM=y ++CONFIG_USB_CONFIGFS_ECM_SUBSET=y ++CONFIG_USB_CONFIGFS_RNDIS=y ++CONFIG_USB_CONFIGFS_EEM=y ++CONFIG_USB_CONFIGFS_PHONET=y ++CONFIG_USB_CONFIGFS_MASS_STORAGE=y ++CONFIG_USB_CONFIGFS_F_LB_SS=y ++CONFIG_USB_CONFIGFS_F_FS=y ++CONFIG_USB_CONFIGFS_F_UAC1=y ++CONFIG_USB_CONFIGFS_F_UAC1_LEGACY=y ++CONFIG_USB_CONFIGFS_F_UAC2=y ++CONFIG_USB_CONFIGFS_F_MIDI=y ++CONFIG_USB_CONFIGFS_F_HID=y ++CONFIG_USB_CONFIGFS_F_UVC=y ++CONFIG_USB_CONFIGFS_F_PRINTER=y ++CONFIG_USB_CONFIGFS_F_TCM=y ++CONFIG_USB_ZERO=m ++CONFIG_USB_AUDIO=m ++CONFIG_GADGET_UAC1=y ++CONFIG_USB_ETH=m ++CONFIG_USB_ETH_EEM=y ++CONFIG_USB_G_NCM=m ++CONFIG_USB_GADGETFS=m ++CONFIG_USB_FUNCTIONFS=m ++CONFIG_USB_FUNCTIONFS_ETH=y ++CONFIG_USB_FUNCTIONFS_RNDIS=y ++CONFIG_USB_FUNCTIONFS_GENERIC=y ++CONFIG_USB_MASS_STORAGE=m ++CONFIG_USB_GADGET_TARGET=m ++CONFIG_USB_G_SERIAL=m ++CONFIG_USB_MIDI_GADGET=m ++CONFIG_USB_G_PRINTER=m ++CONFIG_USB_CDC_COMPOSITE=m ++CONFIG_USB_G_NOKIA=m ++CONFIG_USB_G_ACM_MS=m ++CONFIG_USB_G_HID=m ++CONFIG_USB_G_DBGP=m ++CONFIG_USB_G_WEBCAM=m ++CONFIG_USB_RAW_GADGET=m ++CONFIG_TYPEC=m ++CONFIG_TYPEC_TCPM=m ++CONFIG_TYPEC_TCPCI=m ++CONFIG_TYPEC_RT1711H=m ++CONFIG_TYPEC_MT6360=m ++CONFIG_TYPEC_TCPCI_MT6370=m ++CONFIG_TYPEC_TCPCI_MAXIM=m ++CONFIG_TYPEC_FUSB302=m ++CONFIG_TYPEC_UCSI=m ++CONFIG_UCSI_CCG=m ++CONFIG_UCSI_STM32G0=m ++CONFIG_TYPEC_TPS6598X=m ++CONFIG_TYPEC_ANX7411=m ++CONFIG_TYPEC_RT1719=m ++CONFIG_TYPEC_HD3SS3220=m ++CONFIG_TYPEC_STUSB160X=m ++CONFIG_TYPEC_WUSB3801=m ++CONFIG_TYPEC_MUX_FSA4480=m ++CONFIG_TYPEC_MUX_PI3USB30532=m ++CONFIG_TYPEC_DP_ALTMODE=m ++CONFIG_TYPEC_NVIDIA_ALTMODE=m ++CONFIG_MMC=y ++CONFIG_PWRSEQ_EMMC=m ++CONFIG_PWRSEQ_SD8787=m ++CONFIG_PWRSEQ_SIMPLE=m ++CONFIG_SDIO_UART=m ++CONFIG_MMC_CRYPTO=y ++CONFIG_MMC_SDHCI=y ++CONFIG_MMC_SDHCI_PCI=m ++CONFIG_MMC_SDHCI_PLTFM=y ++CONFIG_MMC_SDHCI_OF_ARASAN=m ++CONFIG_MMC_SDHCI_OF_AT91=m ++CONFIG_MMC_SDHCI_OF_DWCMSHC=m ++CONFIG_MMC_SDHCI_CADENCE=y ++CONFIG_MMC_SDHCI_F_SDH30=m ++CONFIG_MMC_SDHCI_MILBEAUT=m ++CONFIG_MMC_ALCOR=m ++CONFIG_MMC_TIFM_SD=m ++CONFIG_MMC_SPI=y ++CONFIG_MMC_CB710=m ++CONFIG_MMC_VIA_SDMMC=m ++CONFIG_MMC_DW=m ++CONFIG_MMC_DW_BLUEFIELD=m ++CONFIG_MMC_DW_EXYNOS=m ++CONFIG_MMC_DW_HI3798CV200=m ++CONFIG_MMC_DW_K3=m ++CONFIG_MMC_DW_PCI=m ++CONFIG_MMC_VUB300=m ++CONFIG_MMC_USHC=m ++CONFIG_MMC_USDHI6ROL0=m ++CONFIG_MMC_REALTEK_PCI=m ++CONFIG_MMC_REALTEK_USB=m ++CONFIG_MMC_HSQ=m ++CONFIG_MMC_TOSHIBA_PCI=m ++CONFIG_MMC_MTK=m ++CONFIG_MMC_SDHCI_XENON=m ++CONFIG_MMC_LITEX=m ++CONFIG_SCSI_UFSHCD=m ++CONFIG_SCSI_UFS_BSG=y ++CONFIG_SCSI_UFS_CRYPTO=y ++CONFIG_SCSI_UFSHCD_PCI=m ++CONFIG_SCSI_UFS_DWC_TC_PCI=m ++CONFIG_SCSI_UFSHCD_PLATFORM=m ++CONFIG_SCSI_UFS_CDNS_PLATFORM=m ++CONFIG_SCSI_UFS_DWC_TC_PLATFORM=m ++CONFIG_MEMSTICK=m ++CONFIG_MSPRO_BLOCK=m ++CONFIG_MS_BLOCK=m ++CONFIG_MEMSTICK_TIFM_MS=m ++CONFIG_MEMSTICK_JMICRON_38X=m ++CONFIG_MEMSTICK_R592=m ++CONFIG_MEMSTICK_REALTEK_PCI=m ++CONFIG_MEMSTICK_REALTEK_USB=m ++CONFIG_LEDS_CLASS=y ++CONFIG_LEDS_CLASS_FLASH=m ++CONFIG_LEDS_CLASS_MULTICOLOR=m ++CONFIG_LEDS_BRIGHTNESS_HW_CHANGED=y ++CONFIG_LEDS_88PM860X=m ++CONFIG_LEDS_AN30259A=m ++CONFIG_LEDS_AW2013=m ++CONFIG_LEDS_BCM6328=m ++CONFIG_LEDS_BCM6358=m ++CONFIG_LEDS_CPCAP=m ++CONFIG_LEDS_CR0014114=m ++CONFIG_LEDS_EL15203000=m ++CONFIG_LEDS_LM3530=m ++CONFIG_LEDS_LM3532=m ++CONFIG_LEDS_LM3533=m ++CONFIG_LEDS_LM3642=m ++CONFIG_LEDS_LM3692X=m ++CONFIG_LEDS_MT6323=m ++CONFIG_LEDS_PCA9532=m ++CONFIG_LEDS_PCA9532_GPIO=y ++CONFIG_LEDS_GPIO=m ++CONFIG_LEDS_LP3944=m ++CONFIG_LEDS_LP3952=m ++CONFIG_LEDS_LP50XX=m ++CONFIG_LEDS_LP55XX_COMMON=m ++CONFIG_LEDS_LP5521=m ++CONFIG_LEDS_LP5523=m ++CONFIG_LEDS_LP5562=m ++CONFIG_LEDS_LP8501=m ++CONFIG_LEDS_LP8788=m ++CONFIG_LEDS_LP8860=m ++CONFIG_LEDS_PCA955X=m ++CONFIG_LEDS_PCA955X_GPIO=y ++CONFIG_LEDS_PCA963X=m ++CONFIG_LEDS_WM831X_STATUS=m ++CONFIG_LEDS_WM8350=m ++CONFIG_LEDS_DA903X=m ++CONFIG_LEDS_DA9052=m ++CONFIG_LEDS_DAC124S085=m ++CONFIG_LEDS_REGULATOR=m ++CONFIG_LEDS_BD2802=m ++CONFIG_LEDS_LT3593=m ++CONFIG_LEDS_ADP5520=m ++CONFIG_LEDS_MC13783=m ++CONFIG_LEDS_TCA6507=m ++CONFIG_LEDS_TLC591XX=m ++CONFIG_LEDS_MAX77650=m ++CONFIG_LEDS_MAX8997=m ++CONFIG_LEDS_LM355x=m ++CONFIG_LEDS_MENF21BMC=m ++CONFIG_LEDS_IS31FL319X=m ++CONFIG_LEDS_IS31FL32XX=m ++CONFIG_LEDS_BLINKM=m ++CONFIG_LEDS_SYSCON=y ++CONFIG_LEDS_MLXREG=m ++CONFIG_LEDS_USER=m ++CONFIG_LEDS_SPI_BYTE=m ++CONFIG_LEDS_LM3697=m ++CONFIG_LEDS_LM36274=m ++CONFIG_LEDS_AAT1290=m ++CONFIG_LEDS_AS3645A=m ++CONFIG_LEDS_KTD2692=m ++CONFIG_LEDS_LM3601X=m ++CONFIG_LEDS_MAX77693=m ++CONFIG_LEDS_MT6360=m ++CONFIG_LEDS_RT4505=m ++CONFIG_LEDS_RT8515=m ++CONFIG_LEDS_SGM3140=m ++CONFIG_LEDS_TRIGGER_TIMER=m ++CONFIG_LEDS_TRIGGER_ONESHOT=m ++CONFIG_LEDS_TRIGGER_DISK=y ++CONFIG_LEDS_TRIGGER_MTD=y ++CONFIG_LEDS_TRIGGER_HEARTBEAT=m ++CONFIG_LEDS_TRIGGER_BACKLIGHT=m ++CONFIG_LEDS_TRIGGER_CPU=y ++CONFIG_LEDS_TRIGGER_ACTIVITY=m ++CONFIG_LEDS_TRIGGER_DEFAULT_ON=m ++CONFIG_LEDS_TRIGGER_TRANSIENT=m ++CONFIG_LEDS_TRIGGER_CAMERA=m ++CONFIG_LEDS_TRIGGER_PANIC=y ++CONFIG_LEDS_TRIGGER_NETDEV=m ++CONFIG_LEDS_TRIGGER_PATTERN=m ++CONFIG_LEDS_TRIGGER_TTY=m ++CONFIG_ACCESSIBILITY=y ++CONFIG_SPEAKUP=m ++CONFIG_SPEAKUP_SYNTH_ACNTSA=m ++CONFIG_SPEAKUP_SYNTH_APOLLO=m ++CONFIG_SPEAKUP_SYNTH_AUDPTR=m ++CONFIG_SPEAKUP_SYNTH_BNS=m ++CONFIG_SPEAKUP_SYNTH_DECTLK=m ++CONFIG_SPEAKUP_SYNTH_DECEXT=m ++CONFIG_SPEAKUP_SYNTH_LTLK=m ++CONFIG_SPEAKUP_SYNTH_SOFT=m ++CONFIG_SPEAKUP_SYNTH_SPKOUT=m ++CONFIG_SPEAKUP_SYNTH_TXPRT=m ++CONFIG_SPEAKUP_SYNTH_DUMMY=m ++CONFIG_INFINIBAND=m ++CONFIG_INFINIBAND_USER_MAD=m ++CONFIG_INFINIBAND_USER_ACCESS=m ++CONFIG_INFINIBAND_BNXT_RE=m ++CONFIG_INFINIBAND_CXGB4=m ++CONFIG_INFINIBAND_EFA=m ++CONFIG_INFINIBAND_ERDMA=m ++CONFIG_INFINIBAND_IRDMA=m ++CONFIG_MLX4_INFINIBAND=m ++CONFIG_MLX5_INFINIBAND=m ++CONFIG_INFINIBAND_MTHCA=m ++# CONFIG_INFINIBAND_MTHCA_DEBUG is not set ++CONFIG_INFINIBAND_OCRDMA=m ++CONFIG_INFINIBAND_QEDR=m ++CONFIG_INFINIBAND_VMWARE_PVRDMA=m ++CONFIG_INFINIBAND_IPOIB=m ++CONFIG_INFINIBAND_IPOIB_CM=y ++# CONFIG_INFINIBAND_IPOIB_DEBUG is not set ++CONFIG_INFINIBAND_SRP=m ++CONFIG_INFINIBAND_SRPT=m ++CONFIG_INFINIBAND_ISER=m ++CONFIG_INFINIBAND_ISERT=m ++CONFIG_INFINIBAND_RTRS_CLIENT=m ++CONFIG_INFINIBAND_RTRS_SERVER=m ++CONFIG_EDAC=y ++# CONFIG_EDAC_LEGACY_SYSFS is not set ++CONFIG_EDAC_SIFIVE=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_88PM860X=m ++CONFIG_RTC_DRV_88PM80X=m ++CONFIG_RTC_DRV_ABB5ZES3=m ++CONFIG_RTC_DRV_ABEOZ9=m ++CONFIG_RTC_DRV_ABX80X=m ++CONFIG_RTC_DRV_AS3722=m ++CONFIG_RTC_DRV_DS1307=m ++CONFIG_RTC_DRV_DS1307_CENTURY=y ++CONFIG_RTC_DRV_DS1374=m ++CONFIG_RTC_DRV_DS1374_WDT=y ++CONFIG_RTC_DRV_DS1672=m ++CONFIG_RTC_DRV_HYM8563=m ++CONFIG_RTC_DRV_LP8788=m ++CONFIG_RTC_DRV_MAX6900=m ++CONFIG_RTC_DRV_MAX8907=m ++CONFIG_RTC_DRV_MAX8925=m ++CONFIG_RTC_DRV_MAX8998=m ++CONFIG_RTC_DRV_MAX8997=m ++CONFIG_RTC_DRV_MAX77686=m ++CONFIG_RTC_DRV_NCT3018Y=m ++CONFIG_RTC_DRV_RS5C372=m ++CONFIG_RTC_DRV_ISL1208=m ++CONFIG_RTC_DRV_ISL12022=m ++CONFIG_RTC_DRV_ISL12026=m ++CONFIG_RTC_DRV_X1205=m ++CONFIG_RTC_DRV_PCF8523=m ++CONFIG_RTC_DRV_PCF85063=m ++CONFIG_RTC_DRV_PCF85363=m ++CONFIG_RTC_DRV_PCF8563=m ++CONFIG_RTC_DRV_PCF8583=m ++CONFIG_RTC_DRV_M41T80=m ++CONFIG_RTC_DRV_M41T80_WDT=y ++CONFIG_RTC_DRV_BD70528=m ++CONFIG_RTC_DRV_BQ32K=m ++CONFIG_RTC_DRV_TWL4030=m ++CONFIG_RTC_DRV_PALMAS=m ++CONFIG_RTC_DRV_TPS6586X=m ++CONFIG_RTC_DRV_TPS65910=m ++CONFIG_RTC_DRV_RC5T583=m ++CONFIG_RTC_DRV_RC5T619=m ++CONFIG_RTC_DRV_S35390A=m ++CONFIG_RTC_DRV_FM3130=m ++CONFIG_RTC_DRV_RX8010=m ++CONFIG_RTC_DRV_RX8581=m ++CONFIG_RTC_DRV_RX8025=m ++CONFIG_RTC_DRV_EM3027=m ++CONFIG_RTC_DRV_RV3028=m ++CONFIG_RTC_DRV_RV3032=m ++CONFIG_RTC_DRV_RV8803=m ++CONFIG_RTC_DRV_S5M=m ++CONFIG_RTC_DRV_SD3078=m ++CONFIG_RTC_DRV_M41T93=m ++CONFIG_RTC_DRV_M41T94=m ++CONFIG_RTC_DRV_DS1302=m ++CONFIG_RTC_DRV_DS1305=m ++CONFIG_RTC_DRV_DS1343=m ++CONFIG_RTC_DRV_DS1347=m ++CONFIG_RTC_DRV_DS1390=m ++CONFIG_RTC_DRV_MAX6916=m ++CONFIG_RTC_DRV_R9701=m ++CONFIG_RTC_DRV_RX4581=m ++CONFIG_RTC_DRV_RS5C348=m ++CONFIG_RTC_DRV_MAX6902=m ++CONFIG_RTC_DRV_PCF2123=m ++CONFIG_RTC_DRV_MCP795=m ++CONFIG_RTC_DRV_DS3232=m ++CONFIG_RTC_DRV_PCF2127=m ++CONFIG_RTC_DRV_RV3029C2=m ++CONFIG_RTC_DRV_RX6110=m ++CONFIG_RTC_DRV_DS1286=m ++CONFIG_RTC_DRV_DS1511=m ++CONFIG_RTC_DRV_DS1553=m ++CONFIG_RTC_DRV_DS1685_FAMILY=m ++CONFIG_RTC_DRV_DS1742=m ++CONFIG_RTC_DRV_DS2404=m ++CONFIG_RTC_DRV_DA9052=m ++CONFIG_RTC_DRV_DA9055=m ++CONFIG_RTC_DRV_DA9063=m ++CONFIG_RTC_DRV_EFI=m ++CONFIG_RTC_DRV_STK17TA8=m ++CONFIG_RTC_DRV_M48T86=m ++CONFIG_RTC_DRV_M48T35=m ++CONFIG_RTC_DRV_M48T59=m ++CONFIG_RTC_DRV_MSM6242=m ++CONFIG_RTC_DRV_RP5C01=m ++CONFIG_RTC_DRV_WM831X=m ++CONFIG_RTC_DRV_WM8350=m ++CONFIG_RTC_DRV_PCF50633=m ++CONFIG_RTC_DRV_ZYNQMP=m ++CONFIG_RTC_DRV_NTXEC=m ++CONFIG_RTC_DRV_CADENCE=m ++CONFIG_RTC_DRV_FTRTC010=m ++CONFIG_RTC_DRV_PCAP=m ++CONFIG_RTC_DRV_MC13XXX=m ++CONFIG_RTC_DRV_MT6397=m ++CONFIG_RTC_DRV_R7301=m ++CONFIG_RTC_DRV_CPCAP=m ++CONFIG_RTC_DRV_HID_SENSOR_TIME=m ++CONFIG_RTC_DRV_POLARFIRE_SOC=m ++CONFIG_DMADEVICES=y ++CONFIG_ALTERA_MSGDMA=m ++CONFIG_DW_AXI_DMAC=m ++CONFIG_FSL_EDMA=m ++CONFIG_INTEL_IDMA64=m ++CONFIG_PLX_DMA=m ++CONFIG_XILINX_ZYNQMP_DPDMA=m ++CONFIG_QCOM_HIDMA_MGMT=m ++CONFIG_QCOM_HIDMA=m ++CONFIG_DW_DMAC=m ++CONFIG_DW_DMAC_PCI=m ++CONFIG_SF_PDMA=m ++CONFIG_ASYNC_TX_DMA=y ++CONFIG_SW_SYNC=y ++CONFIG_UDMABUF=y ++CONFIG_DMABUF_MOVE_NOTIFY=y ++CONFIG_DMABUF_HEAPS=y ++CONFIG_DMABUF_HEAPS_SYSTEM=y ++CONFIG_DMABUF_HEAPS_CMA=y ++CONFIG_UIO_CIF=m ++CONFIG_UIO_PDRV_GENIRQ=m ++CONFIG_UIO_DMEM_GENIRQ=m ++CONFIG_UIO_AEC=m ++CONFIG_UIO_SERCOS3=m ++CONFIG_UIO_PCI_GENERIC=m ++CONFIG_UIO_NETX=m ++CONFIG_UIO_PRUSS=m ++CONFIG_UIO_MF624=m ++CONFIG_UIO_DFL=m ++CONFIG_VFIO=m ++CONFIG_VFIO_NOIOMMU=y ++CONFIG_VFIO_PCI=m ++CONFIG_MLX5_VFIO_PCI=m ++CONFIG_VIRT_DRIVERS=y ++CONFIG_VIRTIO_PCI=y ++CONFIG_VIRTIO_VDPA=m ++CONFIG_VIRTIO_PMEM=m ++CONFIG_VIRTIO_BALLOON=y ++CONFIG_VIRTIO_INPUT=m ++CONFIG_VIRTIO_MMIO=y ++CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y ++CONFIG_VDPA=m ++CONFIG_VDPA_SIM=m ++CONFIG_VDPA_SIM_NET=m ++CONFIG_VDPA_SIM_BLOCK=m ++CONFIG_VDPA_USER=m ++CONFIG_IFCVF=m ++CONFIG_MLX5_VDPA_NET=m ++CONFIG_VP_VDPA=m ++CONFIG_VHOST_NET=m ++CONFIG_VHOST_SCSI=m ++CONFIG_VHOST_VSOCK=m ++CONFIG_VHOST_VDPA=m ++CONFIG_GREYBUS=m ++CONFIG_GREYBUS_ES2=m ++CONFIG_COMEDI=m ++CONFIG_COMEDI_MISC_DRIVERS=y ++CONFIG_COMEDI_BOND=m ++CONFIG_COMEDI_TEST=m ++CONFIG_COMEDI_PARPORT=m ++CONFIG_COMEDI_ISA_DRIVERS=y ++CONFIG_COMEDI_PCL711=m ++CONFIG_COMEDI_PCL724=m ++CONFIG_COMEDI_PCL726=m ++CONFIG_COMEDI_PCL730=m ++CONFIG_COMEDI_PCL812=m ++CONFIG_COMEDI_PCL816=m ++CONFIG_COMEDI_PCL818=m ++CONFIG_COMEDI_PCM3724=m ++CONFIG_COMEDI_AMPLC_DIO200_ISA=m ++CONFIG_COMEDI_AMPLC_PC236_ISA=m ++CONFIG_COMEDI_AMPLC_PC263_ISA=m ++CONFIG_COMEDI_RTI800=m ++CONFIG_COMEDI_RTI802=m ++CONFIG_COMEDI_DAC02=m ++CONFIG_COMEDI_DAS16M1=m ++CONFIG_COMEDI_DAS08_ISA=m ++CONFIG_COMEDI_DAS16=m ++CONFIG_COMEDI_DAS800=m ++CONFIG_COMEDI_DAS1800=m ++CONFIG_COMEDI_DAS6402=m ++CONFIG_COMEDI_DT2801=m ++CONFIG_COMEDI_DT2811=m ++CONFIG_COMEDI_DT2814=m ++CONFIG_COMEDI_DT2815=m ++CONFIG_COMEDI_DT2817=m ++CONFIG_COMEDI_DT282X=m ++CONFIG_COMEDI_DMM32AT=m ++CONFIG_COMEDI_FL512=m ++CONFIG_COMEDI_AIO_AIO12_8=m ++CONFIG_COMEDI_AIO_IIRO_16=m ++CONFIG_COMEDI_II_PCI20KC=m ++CONFIG_COMEDI_C6XDIGIO=m ++CONFIG_COMEDI_MPC624=m ++CONFIG_COMEDI_ADQ12B=m ++CONFIG_COMEDI_NI_AT_A2150=m ++CONFIG_COMEDI_NI_AT_AO=m ++CONFIG_COMEDI_NI_ATMIO=m ++CONFIG_COMEDI_NI_ATMIO16D=m ++CONFIG_COMEDI_NI_LABPC_ISA=m ++CONFIG_COMEDI_PCMAD=m ++CONFIG_COMEDI_PCMDA12=m ++CONFIG_COMEDI_PCMMIO=m ++CONFIG_COMEDI_PCMUIO=m ++CONFIG_COMEDI_MULTIQ3=m ++CONFIG_COMEDI_S526=m ++CONFIG_COMEDI_PCI_DRIVERS=m ++CONFIG_COMEDI_8255_PCI=m ++CONFIG_COMEDI_ADDI_APCI_1032=m ++CONFIG_COMEDI_ADDI_APCI_1500=m ++CONFIG_COMEDI_ADDI_APCI_1516=m ++CONFIG_COMEDI_ADDI_APCI_1564=m ++CONFIG_COMEDI_ADDI_APCI_16XX=m ++CONFIG_COMEDI_ADDI_APCI_2032=m ++CONFIG_COMEDI_ADDI_APCI_2200=m ++CONFIG_COMEDI_ADDI_APCI_3120=m ++CONFIG_COMEDI_ADDI_APCI_3501=m ++CONFIG_COMEDI_ADDI_APCI_3XXX=m ++CONFIG_COMEDI_ADL_PCI6208=m ++CONFIG_COMEDI_ADL_PCI7X3X=m ++CONFIG_COMEDI_ADL_PCI8164=m ++CONFIG_COMEDI_ADL_PCI9111=m ++CONFIG_COMEDI_ADL_PCI9118=m ++CONFIG_COMEDI_ADV_PCI1710=m ++CONFIG_COMEDI_ADV_PCI1720=m ++CONFIG_COMEDI_ADV_PCI1723=m ++CONFIG_COMEDI_ADV_PCI1724=m ++CONFIG_COMEDI_ADV_PCI1760=m ++CONFIG_COMEDI_ADV_PCI_DIO=m ++CONFIG_COMEDI_AMPLC_DIO200_PCI=m ++CONFIG_COMEDI_AMPLC_PC236_PCI=m ++CONFIG_COMEDI_AMPLC_PC263_PCI=m ++CONFIG_COMEDI_AMPLC_PCI224=m ++CONFIG_COMEDI_AMPLC_PCI230=m ++CONFIG_COMEDI_CONTEC_PCI_DIO=m ++CONFIG_COMEDI_DAS08_PCI=m ++CONFIG_COMEDI_DT3000=m ++CONFIG_COMEDI_DYNA_PCI10XX=m ++CONFIG_COMEDI_GSC_HPDI=m ++CONFIG_COMEDI_MF6X4=m ++CONFIG_COMEDI_ICP_MULTI=m ++CONFIG_COMEDI_DAQBOARD2000=m ++CONFIG_COMEDI_JR3_PCI=m ++CONFIG_COMEDI_KE_COUNTER=m ++CONFIG_COMEDI_CB_PCIDAS64=m ++CONFIG_COMEDI_CB_PCIDAS=m ++CONFIG_COMEDI_CB_PCIDDA=m ++CONFIG_COMEDI_CB_PCIMDAS=m ++CONFIG_COMEDI_CB_PCIMDDA=m ++CONFIG_COMEDI_ME4000=m ++CONFIG_COMEDI_ME_DAQ=m ++CONFIG_COMEDI_NI_6527=m ++CONFIG_COMEDI_NI_65XX=m ++CONFIG_COMEDI_NI_660X=m ++CONFIG_COMEDI_NI_670X=m ++CONFIG_COMEDI_NI_LABPC_PCI=m ++CONFIG_COMEDI_NI_PCIDIO=m ++CONFIG_COMEDI_NI_PCIMIO=m ++CONFIG_COMEDI_RTD520=m ++CONFIG_COMEDI_S626=m ++CONFIG_COMEDI_USB_DRIVERS=m ++CONFIG_COMEDI_DT9812=m ++CONFIG_COMEDI_NI_USB6501=m ++CONFIG_COMEDI_USBDUX=m ++CONFIG_COMEDI_USBDUXFAST=m ++CONFIG_COMEDI_USBDUXSIGMA=m ++CONFIG_COMEDI_VMK80XX=m ++CONFIG_COMEDI_8255_SA=m ++CONFIG_COMEDI_TESTS=m ++CONFIG_COMEDI_TESTS_EXAMPLE=m ++CONFIG_COMEDI_TESTS_NI_ROUTES=m ++CONFIG_STAGING=y ++CONFIG_PRISM2_USB=m ++CONFIG_RTLLIB=m ++CONFIG_RTL8192E=m ++CONFIG_RTL8723BS=m ++CONFIG_R8712U=m ++CONFIG_RTS5208=m ++CONFIG_VT6655=m ++CONFIG_VT6656=m ++CONFIG_ADIS16203=m ++CONFIG_ADIS16240=m ++CONFIG_AD7816=m ++CONFIG_ADT7316=m ++CONFIG_ADT7316_I2C=m ++CONFIG_AD9832=m ++CONFIG_AD9834=m ++CONFIG_AD5933=m ++CONFIG_AD2S1210=m ++CONFIG_FB_SM750=m ++CONFIG_STAGING_MEDIA=y ++CONFIG_VIDEO_MAX96712=m ++CONFIG_LTE_GDM724X=m ++CONFIG_FB_TFT=m ++CONFIG_FB_TFT_AGM1264K_FL=m ++CONFIG_FB_TFT_BD663474=m ++CONFIG_FB_TFT_HX8340BN=m ++CONFIG_FB_TFT_HX8347D=m ++CONFIG_FB_TFT_HX8353D=m ++CONFIG_FB_TFT_HX8357D=m ++CONFIG_FB_TFT_ILI9163=m ++CONFIG_FB_TFT_ILI9320=m ++CONFIG_FB_TFT_ILI9325=m ++CONFIG_FB_TFT_ILI9340=m ++CONFIG_FB_TFT_ILI9341=m ++CONFIG_FB_TFT_ILI9481=m ++CONFIG_FB_TFT_ILI9486=m ++CONFIG_FB_TFT_PCD8544=m ++CONFIG_FB_TFT_RA8875=m ++CONFIG_FB_TFT_S6D02A1=m ++CONFIG_FB_TFT_S6D1121=m ++CONFIG_FB_TFT_SEPS525=m ++CONFIG_FB_TFT_SH1106=m ++CONFIG_FB_TFT_SSD1289=m ++CONFIG_FB_TFT_SSD1305=m ++CONFIG_FB_TFT_SSD1306=m ++CONFIG_FB_TFT_SSD1331=m ++CONFIG_FB_TFT_SSD1351=m ++CONFIG_FB_TFT_ST7735R=m ++CONFIG_FB_TFT_ST7789V=m ++CONFIG_FB_TFT_TINYLCD=m ++CONFIG_FB_TFT_TLS8204=m ++CONFIG_FB_TFT_UC1611=m ++CONFIG_FB_TFT_UC1701=m ++CONFIG_FB_TFT_UPD161704=m ++CONFIG_MOST_COMPONENTS=m ++CONFIG_MOST_NET=m ++CONFIG_MOST_VIDEO=m ++CONFIG_MOST_DIM2=m ++CONFIG_MOST_I2C=m ++CONFIG_KS7010=m ++CONFIG_GREYBUS_AUDIO=m ++CONFIG_GREYBUS_AUDIO_APB_CODEC=m ++CONFIG_GREYBUS_BOOTROM=m ++CONFIG_GREYBUS_FIRMWARE=m ++CONFIG_GREYBUS_HID=m ++CONFIG_GREYBUS_LIGHT=m ++CONFIG_GREYBUS_LOG=m ++CONFIG_GREYBUS_LOOPBACK=m ++CONFIG_GREYBUS_POWER=m ++CONFIG_GREYBUS_RAW=m ++CONFIG_GREYBUS_VIBRATOR=m ++CONFIG_GREYBUS_BRIDGED_PHY=m ++CONFIG_GREYBUS_GPIO=m ++CONFIG_GREYBUS_I2C=m ++CONFIG_GREYBUS_SDIO=m ++CONFIG_GREYBUS_SPI=m ++CONFIG_GREYBUS_UART=m ++CONFIG_GREYBUS_USB=m ++CONFIG_PI433=m ++CONFIG_XIL_AXIS_FIFO=m ++CONFIG_FIELDBUS_DEV=m ++CONFIG_HMS_ANYBUSS_BUS=m ++CONFIG_ARCX_ANYBUS_CONTROLLER=m ++CONFIG_HMS_PROFINET=m ++CONFIG_VME_BUS=y ++CONFIG_VME_TSI148=m ++CONFIG_VME_FAKE=m ++CONFIG_VME_USER=m ++CONFIG_GOLDFISH_PIPE=m ++CONFIG_COMMON_CLK_WM831X=m ++CONFIG_LMK04832=m ++CONFIG_COMMON_CLK_MAX77686=m ++CONFIG_COMMON_CLK_MAX9485=m ++CONFIG_COMMON_CLK_SI5341=m ++CONFIG_COMMON_CLK_SI5351=m ++CONFIG_COMMON_CLK_SI514=m ++CONFIG_COMMON_CLK_SI544=m ++CONFIG_COMMON_CLK_SI570=m ++CONFIG_COMMON_CLK_CDCE706=m ++CONFIG_COMMON_CLK_CDCE925=m ++CONFIG_COMMON_CLK_CS2000_CP=m ++CONFIG_COMMON_CLK_S2MPS11=m ++CONFIG_CLK_TWL6040=m ++CONFIG_COMMON_CLK_AXI_CLKGEN=m ++CONFIG_COMMON_CLK_LOCHNAGAR=m ++CONFIG_COMMON_CLK_PALMAS=m ++CONFIG_COMMON_CLK_RS9_PCIE=m ++CONFIG_COMMON_CLK_VC5=m ++CONFIG_COMMON_CLK_VC7=m ++CONFIG_COMMON_CLK_BD718XX=m ++CONFIG_COMMON_CLK_FIXED_MMIO=y ++# CONFIG_CLK_STARFIVE_JH7100 is not set ++CONFIG_XILINX_VCU=m ++CONFIG_HWSPINLOCK=y ++CONFIG_MAILBOX=y ++CONFIG_PLATFORM_MHU=m ++CONFIG_ALTERA_MBOX=m ++CONFIG_MAILBOX_TEST=m ++CONFIG_POLARFIRE_SOC_MAILBOX=m ++CONFIG_RPMSG_CHAR=m ++CONFIG_RPMSG_CTRL=m ++CONFIG_RPMSG_QCOM_GLINK_RPM=m ++CONFIG_RPMSG_VIRTIO=m ++CONFIG_SOUNDWIRE=m ++CONFIG_SOUNDWIRE_QCOM=m ++CONFIG_LITEX_SOC_CONTROLLER=m ++CONFIG_POLARFIRE_SOC_SYS_CTRL=m ++CONFIG_SIFIVE_CCACHE=y ++CONFIG_SOC_TI=y ++CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y ++CONFIG_DEVFREQ_GOV_PERFORMANCE=y ++CONFIG_DEVFREQ_GOV_POWERSAVE=y ++CONFIG_DEVFREQ_GOV_USERSPACE=y ++CONFIG_DEVFREQ_GOV_PASSIVE=y ++CONFIG_PM_DEVFREQ_EVENT=y ++CONFIG_EXTCON_ADC_JACK=m ++CONFIG_EXTCON_FSA9480=m ++CONFIG_EXTCON_GPIO=m ++CONFIG_EXTCON_MAX14577=m ++CONFIG_EXTCON_MAX3355=m ++CONFIG_EXTCON_MAX77693=m ++CONFIG_EXTCON_MAX77843=m ++CONFIG_EXTCON_MAX8997=m ++CONFIG_EXTCON_PALMAS=m ++CONFIG_EXTCON_PTN5150=m ++CONFIG_EXTCON_RT8973A=m ++CONFIG_EXTCON_SM5502=m ++CONFIG_EXTCON_USB_GPIO=m ++CONFIG_EXTCON_USBC_TUSB320=m ++CONFIG_MEMORY=y ++CONFIG_FPGA_DFL_EMIF=m ++CONFIG_IIO_SW_DEVICE=m ++CONFIG_IIO_SW_TRIGGER=m ++CONFIG_ADIS16201=m ++CONFIG_ADIS16209=m ++CONFIG_ADXL313_I2C=m ++CONFIG_ADXL313_SPI=m ++CONFIG_ADXL355_I2C=m ++CONFIG_ADXL355_SPI=m ++CONFIG_ADXL367_SPI=m ++CONFIG_ADXL367_I2C=m ++CONFIG_ADXL372_SPI=m ++CONFIG_ADXL372_I2C=m ++CONFIG_BMA220=m ++CONFIG_BMA400=m ++CONFIG_BMC150_ACCEL=m ++CONFIG_BMI088_ACCEL=m ++CONFIG_DA280=m ++CONFIG_DA311=m ++CONFIG_DMARD06=m ++CONFIG_DMARD09=m ++CONFIG_DMARD10=m ++CONFIG_FXLS8962AF_I2C=m ++CONFIG_FXLS8962AF_SPI=m ++CONFIG_HID_SENSOR_ACCEL_3D=m ++CONFIG_KXSD9=m ++CONFIG_KXCJK1013=m ++CONFIG_MC3230=m ++CONFIG_MMA7455_I2C=m ++CONFIG_MMA7455_SPI=m ++CONFIG_MMA7660=m ++CONFIG_MMA8452=m ++CONFIG_MMA9551=m ++CONFIG_MMA9553=m ++CONFIG_MSA311=m ++CONFIG_MXC4005=m ++CONFIG_MXC6255=m ++CONFIG_SCA3000=m ++CONFIG_SCA3300=m ++CONFIG_STK8312=m ++CONFIG_STK8BA50=m ++CONFIG_AD7091R5=m ++CONFIG_AD7124=m ++CONFIG_AD7192=m ++CONFIG_AD7266=m ++CONFIG_AD7280=m ++CONFIG_AD7291=m ++CONFIG_AD7292=m ++CONFIG_AD7298=m ++CONFIG_AD7476=m ++CONFIG_AD7606_IFACE_PARALLEL=m ++CONFIG_AD7606_IFACE_SPI=m ++CONFIG_AD7766=m ++CONFIG_AD7768_1=m ++CONFIG_AD7780=m ++CONFIG_AD7791=m ++CONFIG_AD7793=m ++CONFIG_AD7887=m ++CONFIG_AD7923=m ++CONFIG_AD7949=m ++CONFIG_AD799X=m ++CONFIG_AD9467=m ++CONFIG_ADI_AXI_ADC=m ++CONFIG_AXP20X_ADC=m ++CONFIG_AXP288_ADC=m ++CONFIG_CC10001_ADC=m ++CONFIG_CPCAP_ADC=m ++CONFIG_DA9150_GPADC=m ++CONFIG_DLN2_ADC=m ++CONFIG_ENVELOPE_DETECTOR=m ++CONFIG_HI8435=m ++CONFIG_HX711=m ++CONFIG_INA2XX_ADC=m ++CONFIG_LP8788_ADC=m ++CONFIG_LTC2471=m ++CONFIG_LTC2485=m ++CONFIG_LTC2496=m ++CONFIG_LTC2497=m ++CONFIG_MAX1027=m ++CONFIG_MAX11100=m ++CONFIG_MAX1118=m ++CONFIG_MAX11205=m ++CONFIG_MAX1241=m ++CONFIG_MAX1363=m ++CONFIG_MAX9611=m ++CONFIG_MCP320X=m ++CONFIG_MCP3422=m ++CONFIG_MCP3911=m ++CONFIG_MEDIATEK_MT6360_ADC=m ++CONFIG_MEN_Z188_ADC=m ++CONFIG_MP2629_ADC=m ++CONFIG_NAU7802=m ++CONFIG_PALMAS_GPADC=m ++CONFIG_QCOM_SPMI_IADC=m ++CONFIG_QCOM_SPMI_VADC=m ++CONFIG_QCOM_SPMI_ADC5=m ++CONFIG_RN5T618_ADC=m ++CONFIG_RICHTEK_RTQ6056=m ++CONFIG_SD_ADC_MODULATOR=m ++CONFIG_STMPE_ADC=m ++CONFIG_TI_ADC081C=m ++CONFIG_TI_ADC0832=m ++CONFIG_TI_ADC084S021=m ++CONFIG_TI_ADC12138=m ++CONFIG_TI_ADC108S102=m ++CONFIG_TI_ADC128S052=m ++CONFIG_TI_ADC161S626=m ++CONFIG_TI_ADS1015=m ++CONFIG_TI_ADS7950=m ++CONFIG_TI_ADS8344=m ++CONFIG_TI_ADS8688=m ++CONFIG_TI_ADS124S08=m ++CONFIG_TI_ADS131E08=m ++CONFIG_TI_TLC4541=m ++CONFIG_TI_TSC2046=m ++CONFIG_TWL4030_MADC=m ++CONFIG_TWL6030_GPADC=m ++CONFIG_VF610_ADC=m ++CONFIG_VIPERBOARD_ADC=m ++CONFIG_XILINX_XADC=m ++CONFIG_AD74413R=m ++CONFIG_IIO_RESCALE=m ++CONFIG_AD8366=m ++CONFIG_ADA4250=m ++CONFIG_HMC425=m ++CONFIG_AD7150=m ++CONFIG_AD7746=m ++CONFIG_ATLAS_PH_SENSOR=m ++CONFIG_ATLAS_EZO_SENSOR=m ++CONFIG_BME680=m ++CONFIG_CCS811=m ++CONFIG_IAQCORE=m ++CONFIG_PMS7003=m ++CONFIG_SCD30_CORE=m ++CONFIG_SCD30_I2C=m ++CONFIG_SCD30_SERIAL=m ++CONFIG_SCD4X=m ++CONFIG_SENSIRION_SGP30=m ++CONFIG_SENSIRION_SGP40=m ++CONFIG_SPS30_I2C=m ++CONFIG_SPS30_SERIAL=m ++CONFIG_SENSEAIR_SUNRISE_CO2=m ++CONFIG_VZ89X=m ++CONFIG_IIO_SSP_SENSORS_COMMONS=m ++CONFIG_IIO_SSP_SENSORHUB=m ++CONFIG_AD3552R=m ++CONFIG_AD5064=m ++CONFIG_AD5360=m ++CONFIG_AD5380=m ++CONFIG_AD5421=m ++CONFIG_AD5446=m ++CONFIG_AD5449=m ++CONFIG_AD5592R=m ++CONFIG_AD5593R=m ++CONFIG_AD5504=m ++CONFIG_AD5624R_SPI=m ++CONFIG_LTC2688=m ++CONFIG_AD5686_SPI=m ++CONFIG_AD5696_I2C=m ++CONFIG_AD5755=m ++CONFIG_AD5758=m ++CONFIG_AD5761=m ++CONFIG_AD5764=m ++CONFIG_AD5766=m ++CONFIG_AD5770R=m ++CONFIG_AD5791=m ++CONFIG_AD7293=m ++CONFIG_AD7303=m ++CONFIG_AD8801=m ++CONFIG_DPOT_DAC=m ++CONFIG_DS4424=m ++CONFIG_LTC1660=m ++CONFIG_LTC2632=m ++CONFIG_M62332=m ++CONFIG_MAX517=m ++CONFIG_MAX5821=m ++CONFIG_MCP4725=m ++CONFIG_MCP4922=m ++CONFIG_TI_DAC082S085=m ++CONFIG_TI_DAC5571=m ++CONFIG_TI_DAC7311=m ++CONFIG_TI_DAC7612=m ++CONFIG_VF610_DAC=m ++CONFIG_IIO_SIMPLE_DUMMY=m ++CONFIG_ADMV8818=m ++CONFIG_AD9523=m ++CONFIG_ADF4350=m ++CONFIG_ADF4371=m ++CONFIG_ADMV1013=m ++CONFIG_ADMV1014=m ++CONFIG_ADMV4420=m ++CONFIG_ADRF6780=m ++CONFIG_ADIS16080=m ++CONFIG_ADIS16130=m ++CONFIG_ADIS16136=m ++CONFIG_ADIS16260=m ++CONFIG_ADXRS290=m ++CONFIG_ADXRS450=m ++CONFIG_BMG160=m ++CONFIG_FXAS21002C=m ++CONFIG_HID_SENSOR_GYRO_3D=m ++CONFIG_MPU3050_I2C=m ++CONFIG_IIO_ST_GYRO_3AXIS=m ++CONFIG_ITG3200=m ++CONFIG_AFE4403=m ++CONFIG_AFE4404=m ++CONFIG_MAX30100=m ++CONFIG_MAX30102=m ++CONFIG_AM2315=m ++CONFIG_DHT11=m ++CONFIG_HDC100X=m ++CONFIG_HDC2010=m ++CONFIG_HID_SENSOR_HUMIDITY=m ++CONFIG_HTS221=m ++CONFIG_HTU21=m ++CONFIG_SI7005=m ++CONFIG_SI7020=m ++CONFIG_ADIS16400=m ++CONFIG_ADIS16460=m ++CONFIG_ADIS16475=m ++CONFIG_ADIS16480=m ++CONFIG_BMI160_I2C=m ++CONFIG_BMI160_SPI=m ++CONFIG_BOSCH_BNO055_SERIAL=m ++CONFIG_BOSCH_BNO055_I2C=m ++CONFIG_FXOS8700_I2C=m ++CONFIG_FXOS8700_SPI=m ++CONFIG_KMX61=m ++CONFIG_INV_ICM42600_I2C=m ++CONFIG_INV_ICM42600_SPI=m ++CONFIG_INV_MPU6050_I2C=m ++CONFIG_INV_MPU6050_SPI=m ++CONFIG_IIO_ST_LSM6DSX=m ++CONFIG_IIO_ST_LSM9DS0=m ++CONFIG_ADJD_S311=m ++CONFIG_ADUX1020=m ++CONFIG_AL3010=m ++CONFIG_AL3320A=m ++CONFIG_APDS9300=m ++CONFIG_APDS9960=m ++CONFIG_AS73211=m ++CONFIG_BH1750=m ++CONFIG_BH1780=m ++CONFIG_CM32181=m ++CONFIG_CM3232=m ++CONFIG_CM3323=m ++CONFIG_CM3605=m ++CONFIG_CM36651=m ++CONFIG_GP2AP002=m ++CONFIG_GP2AP020A00F=m ++CONFIG_IQS621_ALS=m ++CONFIG_SENSORS_ISL29018=m ++CONFIG_SENSORS_ISL29028=m ++CONFIG_ISL29125=m ++CONFIG_HID_SENSOR_ALS=m ++CONFIG_HID_SENSOR_PROX=m ++CONFIG_JSA1212=m ++CONFIG_RPR0521=m ++CONFIG_SENSORS_LM3533=m ++CONFIG_LTR501=m ++CONFIG_LTRF216A=m ++CONFIG_LV0104CS=m ++CONFIG_MAX44000=m ++CONFIG_MAX44009=m ++CONFIG_NOA1305=m ++CONFIG_OPT3001=m ++CONFIG_PA12203001=m ++CONFIG_SI1133=m ++CONFIG_SI1145=m ++CONFIG_STK3310=m ++CONFIG_ST_UVIS25=m ++CONFIG_TCS3414=m ++CONFIG_TCS3472=m ++CONFIG_SENSORS_TSL2563=m ++CONFIG_TSL2583=m ++CONFIG_TSL2591=m ++CONFIG_TSL2772=m ++CONFIG_TSL4531=m ++CONFIG_US5182D=m ++CONFIG_VCNL4000=m ++CONFIG_VCNL4035=m ++CONFIG_VEML6030=m ++CONFIG_VEML6070=m ++CONFIG_VL6180=m ++CONFIG_ZOPT2201=m ++CONFIG_AK8974=m ++CONFIG_AK09911=m ++CONFIG_BMC150_MAGN_I2C=m ++CONFIG_BMC150_MAGN_SPI=m ++CONFIG_MAG3110=m ++CONFIG_HID_SENSOR_MAGNETOMETER_3D=m ++CONFIG_MMC35240=m ++CONFIG_SENSORS_HMC5843_I2C=m ++CONFIG_SENSORS_HMC5843_SPI=m ++CONFIG_SENSORS_RM3100_I2C=m ++CONFIG_SENSORS_RM3100_SPI=m ++CONFIG_YAMAHA_YAS530=m ++CONFIG_IIO_MUX=m ++CONFIG_HID_SENSOR_INCLINOMETER_3D=m ++CONFIG_HID_SENSOR_DEVICE_ROTATION=m ++CONFIG_IIO_HRTIMER_TRIGGER=m ++CONFIG_IIO_INTERRUPT_TRIGGER=m ++CONFIG_IIO_TIGHTLOOP_TRIGGER=m ++CONFIG_IIO_SYSFS_TRIGGER=m ++CONFIG_IQS624_POS=m ++CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE=m ++CONFIG_AD5110=m ++CONFIG_AD5272=m ++CONFIG_DS1803=m ++CONFIG_MAX5432=m ++CONFIG_MAX5481=m ++CONFIG_MAX5487=m ++CONFIG_MCP4018=m ++CONFIG_MCP4131=m ++CONFIG_MCP4531=m ++CONFIG_MCP41010=m ++CONFIG_TPL0102=m ++CONFIG_LMP91000=m ++CONFIG_ABP060MG=m ++CONFIG_BMP280=m ++CONFIG_DLHL60D=m ++CONFIG_DPS310=m ++CONFIG_HID_SENSOR_PRESS=m ++CONFIG_HP03=m ++CONFIG_ICP10100=m ++CONFIG_MPL115_I2C=m ++CONFIG_MPL115_SPI=m ++CONFIG_MPL3115=m ++CONFIG_MS5611=m ++CONFIG_MS5611_I2C=m ++CONFIG_MS5611_SPI=m ++CONFIG_MS5637=m ++CONFIG_IIO_ST_PRESS=m ++CONFIG_T5403=m ++CONFIG_HP206C=m ++CONFIG_ZPA2326=m ++CONFIG_AS3935=m ++CONFIG_ISL29501=m ++CONFIG_LIDAR_LITE_V2=m ++CONFIG_MB1232=m ++CONFIG_PING=m ++CONFIG_RFD77402=m ++CONFIG_SRF04=m ++CONFIG_SX9310=m ++CONFIG_SX9324=m ++CONFIG_SX9360=m ++CONFIG_SX9500=m ++CONFIG_SRF08=m ++CONFIG_VCNL3020=m ++CONFIG_VL53L0X_I2C=m ++CONFIG_AD2S90=m ++CONFIG_AD2S1200=m ++CONFIG_IQS620AT_TEMP=m ++CONFIG_LTC2983=m ++CONFIG_MAXIM_THERMOCOUPLE=m ++CONFIG_HID_SENSOR_TEMP=m ++CONFIG_MLX90614=m ++CONFIG_MLX90632=m ++CONFIG_TMP006=m ++CONFIG_TMP007=m ++CONFIG_TMP117=m ++CONFIG_TSYS01=m ++CONFIG_TSYS02D=m ++CONFIG_MAX31856=m ++CONFIG_MAX31865=m ++CONFIG_NTB=m ++CONFIG_NTB_MSI=y ++CONFIG_NTB_IDT=m ++CONFIG_NTB_EPF=m ++CONFIG_NTB_SWITCHTEC=m ++CONFIG_NTB_PINGPONG=m ++CONFIG_NTB_TOOL=m ++CONFIG_NTB_PERF=m ++CONFIG_NTB_TRANSPORT=m ++CONFIG_AL_FIC=y ++CONFIG_XILINX_INTC=y ++CONFIG_IPACK_BUS=m ++CONFIG_BOARD_TPCI200=m ++CONFIG_SERIAL_IPOCTAL=m ++CONFIG_RESET_TI_SYSCON=m ++CONFIG_RESET_TI_TPS380X=m ++# CONFIG_RESET_STARFIVE_JH7100 is not set ++CONFIG_PHY_CAN_TRANSCEIVER=m ++CONFIG_BCM_KONA_USB2_PHY=m ++CONFIG_PHY_CADENCE_TORRENT=m ++CONFIG_PHY_CADENCE_DPHY=m ++CONFIG_PHY_CADENCE_DPHY_RX=m ++CONFIG_PHY_CADENCE_SIERRA=m ++CONFIG_PHY_CADENCE_SALVO=m ++CONFIG_PHY_PXA_28NM_HSIC=m ++CONFIG_PHY_PXA_28NM_USB2=m ++CONFIG_PHY_LAN966X_SERDES=m ++CONFIG_PHY_CPCAP_USB=m ++CONFIG_PHY_MAPPHONE_MDM6600=m ++CONFIG_PHY_OCELOT_SERDES=m ++CONFIG_PHY_QCOM_USB_HS=m ++CONFIG_PHY_QCOM_USB_HSIC=m ++CONFIG_PHY_SAMSUNG_USB2=m ++CONFIG_PHY_TUSB1210=m ++CONFIG_POWERCAP=y ++CONFIG_IDLE_INJECT=y ++CONFIG_DTPM=y ++CONFIG_MCB=m ++CONFIG_MCB_PCI=m ++CONFIG_MCB_LPC=m ++CONFIG_USB4=m ++CONFIG_LIBNVDIMM=y ++CONFIG_BLK_DEV_PMEM=m ++CONFIG_OF_PMEM=m ++CONFIG_DAX=y ++CONFIG_DEV_DAX=m ++CONFIG_NVMEM_RAVE_SP_EEPROM=m ++CONFIG_NVMEM_RMEM=m ++CONFIG_NVMEM_SPMI_SDAM=m ++CONFIG_NVMEM_U_BOOT_ENV=m ++CONFIG_STM=m ++CONFIG_STM_PROTO_BASIC=m ++CONFIG_STM_PROTO_SYS_T=m ++CONFIG_STM_DUMMY=m ++CONFIG_STM_SOURCE_CONSOLE=m ++CONFIG_STM_SOURCE_HEARTBEAT=m ++CONFIG_STM_SOURCE_FTRACE=m ++CONFIG_INTEL_TH=m ++CONFIG_INTEL_TH_PCI=m ++CONFIG_INTEL_TH_GTH=m ++CONFIG_INTEL_TH_STH=m ++CONFIG_INTEL_TH_MSU=m ++CONFIG_INTEL_TH_PTI=m ++CONFIG_FPGA=m ++CONFIG_ALTERA_PR_IP_CORE=m ++CONFIG_ALTERA_PR_IP_CORE_PLAT=m ++CONFIG_FPGA_MGR_ALTERA_PS_SPI=m ++CONFIG_FPGA_MGR_ALTERA_CVP=m ++CONFIG_FPGA_MGR_XILINX_SPI=m ++CONFIG_FPGA_MGR_ICE40_SPI=m ++CONFIG_FPGA_MGR_MACHXO2_SPI=m ++CONFIG_ALTERA_FREEZE_BRIDGE=m ++CONFIG_XILINX_PR_DECOUPLER=m ++CONFIG_OF_FPGA_REGION=m ++CONFIG_FPGA_DFL=m ++CONFIG_FPGA_DFL_FME=m ++CONFIG_FPGA_DFL_FME_MGR=m ++CONFIG_FPGA_DFL_FME_BRIDGE=m ++CONFIG_FPGA_DFL_FME_REGION=m ++CONFIG_FPGA_DFL_AFU=m ++CONFIG_FPGA_DFL_NIOS_INTEL_PAC_N3000=m ++CONFIG_FPGA_DFL_PCI=m ++CONFIG_FPGA_MGR_MICROCHIP_SPI=m ++CONFIG_FSI=m ++CONFIG_FSI_MASTER_GPIO=m ++CONFIG_FSI_MASTER_HUB=m ++CONFIG_FSI_MASTER_ASPEED=m ++CONFIG_FSI_SCOM=m ++CONFIG_FSI_SBEFIFO=m ++CONFIG_FSI_OCC=m ++CONFIG_MUX_ADG792A=m ++CONFIG_MUX_ADGS1408=m ++CONFIG_MUX_GPIO=m ++CONFIG_MUX_MMIO=m ++CONFIG_SIOX=m ++CONFIG_SIOX_BUS_GPIO=m ++CONFIG_SLIM_QCOM_CTRL=m ++CONFIG_INTERCONNECT=y ++CONFIG_MOST=m ++CONFIG_MOST_USB_HDM=m ++CONFIG_MOST_CDEV=m ++CONFIG_MOST_SND=m ++CONFIG_PECI=m ++CONFIG_VALIDATE_FS_PARSER=y ++CONFIG_EXT4_FS=y ++CONFIG_EXT4_FS_POSIX_ACL=y ++CONFIG_EXT4_FS_SECURITY=y ++CONFIG_REISERFS_FS=m ++CONFIG_REISERFS_FS_XATTR=y ++CONFIG_REISERFS_FS_POSIX_ACL=y ++CONFIG_REISERFS_FS_SECURITY=y ++CONFIG_JFS_FS=m ++CONFIG_JFS_POSIX_ACL=y ++CONFIG_JFS_SECURITY=y ++CONFIG_JFS_STATISTICS=y ++CONFIG_XFS_FS=m ++CONFIG_XFS_QUOTA=y ++CONFIG_XFS_POSIX_ACL=y ++CONFIG_XFS_RT=y ++CONFIG_GFS2_FS=m ++CONFIG_GFS2_FS_LOCKING_DLM=y ++CONFIG_OCFS2_FS=m ++CONFIG_BTRFS_FS=m ++CONFIG_BTRFS_FS_POSIX_ACL=y ++CONFIG_NILFS2_FS=m ++CONFIG_F2FS_FS=m ++CONFIG_F2FS_FS_SECURITY=y ++CONFIG_F2FS_FS_COMPRESSION=y ++# CONFIG_F2FS_IOSTAT is not set ++CONFIG_F2FS_UNFAIR_RWSEM=y ++CONFIG_ZONEFS_FS=m ++CONFIG_FS_ENCRYPTION=y ++CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y ++CONFIG_FS_VERITY=y ++CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y ++CONFIG_FANOTIFY=y ++CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y ++CONFIG_QUOTA_NETLINK_INTERFACE=y ++CONFIG_QFMT_V1=m ++CONFIG_QFMT_V2=m ++CONFIG_AUTOFS_FS=y ++CONFIG_FUSE_FS=y ++CONFIG_CUSE=m ++CONFIG_VIRTIO_FS=m ++CONFIG_OVERLAY_FS=m ++CONFIG_OVERLAY_FS_XINO_AUTO=y ++CONFIG_FSCACHE=y ++CONFIG_FSCACHE_STATS=y ++CONFIG_CACHEFILES=m ++CONFIG_CACHEFILES_ERROR_INJECTION=y ++CONFIG_ISO9660_FS=m ++CONFIG_JOLIET=y ++CONFIG_ZISOFS=y ++CONFIG_UDF_FS=m ++CONFIG_MSDOS_FS=m ++CONFIG_VFAT_FS=y ++CONFIG_EXFAT_FS=m ++CONFIG_NTFS_FS=m ++CONFIG_NTFS3_FS=m ++CONFIG_NTFS3_LZX_XPRESS=y ++CONFIG_NTFS3_FS_POSIX_ACL=y ++CONFIG_PROC_KCORE=y ++CONFIG_PROC_VMCORE_DEVICE_DUMP=y ++CONFIG_TMPFS=y ++CONFIG_TMPFS_POSIX_ACL=y ++CONFIG_TMPFS_INODE64=y ++CONFIG_HUGETLBFS=y ++CONFIG_EFIVAR_FS=y ++CONFIG_ORANGEFS_FS=m ++CONFIG_ADFS_FS=m ++CONFIG_AFFS_FS=m ++CONFIG_ECRYPT_FS=y ++CONFIG_ECRYPT_FS_MESSAGING=y ++CONFIG_HFS_FS=m ++CONFIG_HFSPLUS_FS=m ++CONFIG_BEFS_FS=m ++CONFIG_BFS_FS=m ++CONFIG_EFS_FS=m ++CONFIG_JFFS2_FS=m ++CONFIG_JFFS2_FS_XATTR=y ++CONFIG_JFFS2_COMPRESSION_OPTIONS=y ++CONFIG_JFFS2_LZO=y ++CONFIG_JFFS2_CMODE_FAVOURLZO=y ++CONFIG_UBIFS_FS=m ++CONFIG_UBIFS_FS_AUTHENTICATION=y ++CONFIG_CRAMFS=m ++CONFIG_CRAMFS_MTD=y ++CONFIG_SQUASHFS=y ++CONFIG_SQUASHFS_FILE_DIRECT=y ++CONFIG_SQUASHFS_XATTR=y ++CONFIG_SQUASHFS_LZ4=y ++CONFIG_SQUASHFS_LZO=y ++CONFIG_SQUASHFS_XZ=y ++CONFIG_SQUASHFS_ZSTD=y ++CONFIG_VXFS_FS=m ++CONFIG_MINIX_FS=m ++CONFIG_OMFS_FS=m ++CONFIG_HPFS_FS=m ++CONFIG_QNX4FS_FS=m ++CONFIG_QNX6FS_FS=m ++CONFIG_ROMFS_FS=m ++CONFIG_PSTORE=y ++CONFIG_PSTORE_RAM=m ++CONFIG_PSTORE_BLK=m ++CONFIG_SYSV_FS=m ++CONFIG_UFS_FS=m ++CONFIG_EROFS_FS=m ++CONFIG_NFS_FS=m ++CONFIG_NFS_V3_ACL=y ++CONFIG_NFS_V4=m ++CONFIG_NFS_SWAP=y ++CONFIG_NFS_V4_1=y ++CONFIG_NFS_V4_2=y ++CONFIG_NFS_V4_1_MIGRATION=y ++CONFIG_NFS_FSCACHE=y ++CONFIG_NFSD=m ++CONFIG_NFSD_V3_ACL=y ++CONFIG_NFSD_V4=y ++CONFIG_NFSD_BLOCKLAYOUT=y ++CONFIG_NFSD_SCSILAYOUT=y ++CONFIG_NFSD_FLEXFILELAYOUT=y ++CONFIG_NFSD_V4_2_INTER_SSC=y ++CONFIG_NFSD_V4_SECURITY_LABEL=y ++CONFIG_SUNRPC_DEBUG=y ++CONFIG_CEPH_FS=m ++CONFIG_CEPH_FSCACHE=y ++CONFIG_CEPH_FS_POSIX_ACL=y ++CONFIG_CEPH_FS_SECURITY_LABEL=y ++CONFIG_CIFS=m ++# CONFIG_CIFS_STATS2 is not set ++CONFIG_CIFS_UPCALL=y ++CONFIG_CIFS_XATTR=y ++CONFIG_CIFS_POSIX=y ++CONFIG_CIFS_DFS_UPCALL=y ++CONFIG_CIFS_SWN_UPCALL=y ++CONFIG_CIFS_FSCACHE=y ++CONFIG_SMB_SERVER=m ++CONFIG_SMB_SERVER_SMBDIRECT=y ++CONFIG_SMB_SERVER_KERBEROS5=y ++CONFIG_CODA_FS=m ++CONFIG_AFS_FS=m ++CONFIG_AFS_FSCACHE=y ++CONFIG_9P_FS=m ++CONFIG_9P_FSCACHE=y ++CONFIG_9P_FS_POSIX_ACL=y ++CONFIG_9P_FS_SECURITY=y ++CONFIG_NLS_DEFAULT="utf8" ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_CODEPAGE_737=m ++CONFIG_NLS_CODEPAGE_775=m ++CONFIG_NLS_CODEPAGE_850=m ++CONFIG_NLS_CODEPAGE_852=m ++CONFIG_NLS_CODEPAGE_855=m ++CONFIG_NLS_CODEPAGE_857=m ++CONFIG_NLS_CODEPAGE_860=m ++CONFIG_NLS_CODEPAGE_861=m ++CONFIG_NLS_CODEPAGE_862=m ++CONFIG_NLS_CODEPAGE_863=m ++CONFIG_NLS_CODEPAGE_864=m ++CONFIG_NLS_CODEPAGE_865=m ++CONFIG_NLS_CODEPAGE_866=m ++CONFIG_NLS_CODEPAGE_869=m ++CONFIG_NLS_CODEPAGE_936=m ++CONFIG_NLS_CODEPAGE_950=m ++CONFIG_NLS_CODEPAGE_932=m ++CONFIG_NLS_CODEPAGE_949=m ++CONFIG_NLS_CODEPAGE_874=m ++CONFIG_NLS_ISO8859_8=m ++CONFIG_NLS_CODEPAGE_1250=m ++CONFIG_NLS_CODEPAGE_1251=m ++CONFIG_NLS_ASCII=m ++CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_ISO8859_2=m ++CONFIG_NLS_ISO8859_3=m ++CONFIG_NLS_ISO8859_4=m ++CONFIG_NLS_ISO8859_5=m ++CONFIG_NLS_ISO8859_6=m ++CONFIG_NLS_ISO8859_7=m ++CONFIG_NLS_ISO8859_9=m ++CONFIG_NLS_ISO8859_13=m ++CONFIG_NLS_ISO8859_14=m ++CONFIG_NLS_ISO8859_15=m ++CONFIG_NLS_KOI8_R=m ++CONFIG_NLS_KOI8_U=m ++CONFIG_NLS_MAC_ROMAN=m ++CONFIG_NLS_MAC_CELTIC=m ++CONFIG_NLS_MAC_CENTEURO=m ++CONFIG_NLS_MAC_CROATIAN=m ++CONFIG_NLS_MAC_CYRILLIC=m ++CONFIG_NLS_MAC_GAELIC=m ++CONFIG_NLS_MAC_GREEK=m ++CONFIG_NLS_MAC_ICELAND=m ++CONFIG_NLS_MAC_INUIT=m ++CONFIG_NLS_MAC_ROMANIAN=m ++CONFIG_NLS_MAC_TURKISH=m ++CONFIG_DLM=m ++CONFIG_UNICODE=y ++CONFIG_KEYS_REQUEST_CACHE=y ++CONFIG_PERSISTENT_KEYRINGS=y ++CONFIG_TRUSTED_KEYS=y ++CONFIG_USER_DECRYPTED_DATA=y ++CONFIG_KEY_DH_OPERATIONS=y ++CONFIG_KEY_NOTIFICATIONS=y ++CONFIG_SECURITY_DMESG_RESTRICT=y ++CONFIG_SECURITY=y ++CONFIG_SECURITY_INFINIBAND=y ++CONFIG_SECURITY_NETWORK_XFRM=y ++CONFIG_LSM_MMAP_MIN_ADDR=0 ++CONFIG_HARDENED_USERCOPY=y ++CONFIG_SECURITY_SELINUX=y ++CONFIG_SECURITY_SELINUX_BOOTPARAM=y ++CONFIG_SECURITY_SMACK=y ++CONFIG_SECURITY_SMACK_NETFILTER=y ++CONFIG_SECURITY_SMACK_APPEND_SIGNALS=y ++CONFIG_SECURITY_TOMOYO=y ++CONFIG_SECURITY_APPARMOR=y ++CONFIG_SECURITY_YAMA=y ++CONFIG_SECURITY_SAFESETID=y ++CONFIG_SECURITY_LOCKDOWN_LSM=y ++CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y ++CONFIG_SECURITY_LANDLOCK=y ++CONFIG_INTEGRITY_SIGNATURE=y ++CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y ++CONFIG_IMA=y ++CONFIG_IMA_KEXEC=y ++CONFIG_IMA_DEFAULT_HASH_SHA256=y ++CONFIG_IMA_APPRAISE=y ++CONFIG_IMA_ARCH_POLICY=y ++CONFIG_IMA_APPRAISE_MODSIG=y ++CONFIG_EVM=y ++CONFIG_EVM_EXTRA_SMACK_XATTRS=y ++CONFIG_EVM_ADD_XATTRS=y ++CONFIG_DEFAULT_SECURITY_DAC=y ++CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y ++CONFIG_ZERO_CALL_USED_REGS=y ++CONFIG_CRYPTO_USER=m ++CONFIG_CRYPTO_PCRYPT=m ++CONFIG_CRYPTO_CRYPTD=m ++CONFIG_CRYPTO_TEST=m ++CONFIG_CRYPTO_ECDSA=m ++CONFIG_CRYPTO_ECRDSA=m ++CONFIG_CRYPTO_SM2=m ++CONFIG_CRYPTO_CURVE25519=m ++CONFIG_CRYPTO_AES_TI=m ++CONFIG_CRYPTO_ARIA=m ++CONFIG_CRYPTO_BLOWFISH=m ++CONFIG_CRYPTO_CAMELLIA=m ++CONFIG_CRYPTO_CAST5=m ++CONFIG_CRYPTO_CAST6=m ++CONFIG_CRYPTO_DES=m ++CONFIG_CRYPTO_SERPENT=m ++CONFIG_CRYPTO_SM4_GENERIC=m ++CONFIG_CRYPTO_TWOFISH=m ++CONFIG_CRYPTO_ADIANTUM=m ++CONFIG_CRYPTO_HCTR2=m ++CONFIG_CRYPTO_KEYWRAP=m ++CONFIG_CRYPTO_LRW=m ++CONFIG_CRYPTO_AEGIS128=m ++CONFIG_CRYPTO_CHACHA20POLY1305=m ++CONFIG_CRYPTO_GCM=y ++CONFIG_CRYPTO_SEQIV=y ++CONFIG_CRYPTO_MD4=m ++CONFIG_CRYPTO_RMD160=m ++CONFIG_CRYPTO_SM3_GENERIC=m ++CONFIG_CRYPTO_VMAC=m ++CONFIG_CRYPTO_WP512=m ++CONFIG_CRYPTO_XCBC=m ++CONFIG_CRYPTO_842=m ++CONFIG_CRYPTO_LZ4=m ++CONFIG_CRYPTO_LZ4HC=m ++CONFIG_CRYPTO_ANSI_CPRNG=m ++CONFIG_CRYPTO_DRBG_HASH=y ++CONFIG_CRYPTO_DRBG_CTR=y ++CONFIG_CRYPTO_USER_API_HASH=m ++CONFIG_CRYPTO_USER_API_SKCIPHER=m ++CONFIG_CRYPTO_USER_API_RNG=m ++CONFIG_CRYPTO_USER_API_AEAD=m ++# CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE is not set ++CONFIG_CRYPTO_STATS=y ++# CONFIG_CRYPTO_HW is not set ++CONFIG_PKCS8_PRIVATE_KEY_PARSER=m ++CONFIG_PKCS7_TEST_KEY=m ++CONFIG_SIGNED_PE_FILE_VERIFICATION=y ++CONFIG_MODULE_SIG_KEY="" ++CONFIG_XZ_DEC_MICROLZMA=y ++CONFIG_XZ_DEC_TEST=m ++CONFIG_DMA_RESTRICTED_POOL=y ++CONFIG_DMA_CMA=y ++CONFIG_CMA_SIZE_MBYTES=256 ++CONFIG_FONTS=y ++CONFIG_FONT_8x8=y ++CONFIG_FONT_ACORN_8x8=y ++CONFIG_FONT_6x10=y ++CONFIG_FONT_TER16x32=y ++CONFIG_PRINTK_TIME=y ++CONFIG_CONSOLE_LOGLEVEL_QUIET=3 ++CONFIG_BOOT_PRINTK_DELAY=y ++CONFIG_DYNAMIC_DEBUG=y ++CONFIG_FRAME_WARN=1024 ++CONFIG_VMLINUX_MAP=y ++CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x01b6 ++CONFIG_KGDB=y ++CONFIG_KGDB_KDB=y ++CONFIG_KDB_KEYBOARD=y ++CONFIG_PAGE_POISONING=y ++CONFIG_DEBUG_WX=y ++CONFIG_SCHED_STACK_END_CHECK=y ++CONFIG_KFENCE=y ++CONFIG_KFENCE_SAMPLE_INTERVAL=0 ++CONFIG_SOFTLOCKUP_DETECTOR=y ++CONFIG_RCU_CPU_STALL_TIMEOUT=60 ++# CONFIG_RCU_TRACE is not set ++CONFIG_LATENCYTOP=y ++CONFIG_BOOTTIME_TRACING=y ++CONFIG_FUNCTION_PROFILER=y ++CONFIG_STACK_TRACER=y ++CONFIG_IRQSOFF_TRACER=y ++CONFIG_SCHED_TRACER=y ++CONFIG_HWLAT_TRACER=y ++CONFIG_TIMERLAT_TRACER=y ++CONFIG_FTRACE_SYSCALLS=y ++CONFIG_BLK_DEV_IO_TRACE=y ++CONFIG_BPF_KPROBE_OVERRIDE=y ++CONFIG_SYNTH_EVENTS=y ++CONFIG_TRACE_EVENT_INJECT=y ++CONFIG_RV=y ++CONFIG_RV_MON_WWNR=y ++CONFIG_SAMPLES=y ++CONFIG_SAMPLE_TRACE_PRINTK=m ++CONFIG_SAMPLE_TRACE_ARRAY=m ++CONFIG_NOTIFIER_ERROR_INJECTION=m ++CONFIG_FUNCTION_ERROR_INJECTION=y ++CONFIG_TEST_BPF=m ++CONFIG_TEST_BLACKHOLE_DEV=m ++CONFIG_MEMTEST=y +diff --git a/arch/riscv/configs/k1_defconfig b/arch/riscv/configs/k1_defconfig +new file mode 100644 +index 000000000000..72df9883c25c +--- /dev/null ++++ b/arch/riscv/configs/k1_defconfig +@@ -0,0 +1,31 @@ ++# ++# Spacemit k1 SoC support ++# ++CONFIG_SOC_SPACEMIT=y ++CONFIG_SOC_SPACEMIT_K1=y ++CONFIG_SOC_SPACEMIT_K1X=y ++CONFIG_RISCV_ISA_ZICBOM=y ++CONFIG_SPACEMIT_K1X_CCU=y ++CONFIG_RESET_K1X_SPACEMIT=y ++CONFIG_PINCTRL_SPACEMIT_K1X=y ++CONFIG_GPIO_K1X=y ++CONFIG_SERIAL_SPACEMIT_K1X=y ++CONFIG_SERIAL_SPACEMIT_K1X_CONSOLE=y ++CONFIG_SERIAL_DEV_BUS=y ++CONFIG_SPACEMIT_MEM_RANGE=y ++CONFIG_SPACEMIT_K1_DMA=y ++CONFIG_I2C_SPACEMIT_K1=y ++CONFIG_SPI_SPACEMIT_K1=y ++CONFIG_SPI_SPACEMIT_K1_QSPI=y ++CONFIG_PWM_PXA=m ++CONFIG_MFD_CORE=y ++CONFIG_MFD_SPACEMIT_P1=y ++CONFIG_REGULATOR_SPACEMIT_P1=y ++CONFIG_INPUT_SPACEMIT_P1_PWRKEY=m ++CONFIG_PINCTRL_SPACEMIT_P1=m ++CONFIG_RTC_DRV_SPACEMIT_P1=m ++CONFIG_SPACEMIT_P1_ADC=m ++CONFIG_MMC_SDHCI_OF_K1=y ++CONFIG_NET_VENDOR_SPACEMIT=y ++CONFIG_K1_EMAC=m ++ diff --git a/arch/riscv/configs/openeuler_defconfig b/arch/riscv/configs/openeuler_defconfig -index 61f2b2f12589..c39bbc3701b3 100644 +index 61f2b2f12589..a09cebedc1c8 100644 --- a/arch/riscv/configs/openeuler_defconfig +++ b/arch/riscv/configs/openeuler_defconfig @@ -2,6 +2,7 @@ @@ -17412,36 +28493,21 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_IRQ_FORCED_THREADING=y CONFIG_SPARSE_IRQ=y # CONFIG_GENERIC_IRQ_DEBUGFS is not set -@@ -90,11 +92,12 @@ CONFIG_BPF_JIT_DEFAULT_ON=y +@@ -90,9 +92,10 @@ CONFIG_BPF_JIT_DEFAULT_ON=y # CONFIG_BPF_SCHED is not set # end of BPF subsystem -CONFIG_PREEMPT_VOLUNTARY_BUILD=y -+CONFIG_PREEMPT_BUILD=y - # CONFIG_PREEMPT_NONE is not set +-# CONFIG_PREEMPT_NONE is not set -CONFIG_PREEMPT_VOLUNTARY=y --# CONFIG_PREEMPT is not set ++# CONFIG_BPF_RVI is not set ++CONFIG_PREEMPT_NONE_BUILD=y ++CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set -+CONFIG_PREEMPT=y + # CONFIG_PREEMPT is not set CONFIG_PREEMPT_COUNT=y -+CONFIG_PREEMPTION=y # CONFIG_PREEMPT_DYNAMIC is not set - - # -@@ -123,10 +126,11 @@ CONFIG_CPU_ISOLATION=y - # RCU Subsystem - # - CONFIG_TREE_RCU=y -+CONFIG_PREEMPT_RCU=y - # CONFIG_RCU_EXPERT is not set - CONFIG_TREE_SRCU=y - CONFIG_TASKS_RCU_GENERIC=y --CONFIG_TASKS_RUDE_RCU=y -+CONFIG_TASKS_RCU=y - CONFIG_TASKS_TRACE_RCU=y - CONFIG_RCU_STALL_COMMON=y - CONFIG_RCU_NEED_SEGCBLIST=y -@@ -148,7 +152,7 @@ CONFIG_GENERIC_SCHED_CLOCK=y +@@ -148,7 +151,7 @@ CONFIG_GENERIC_SCHED_CLOCK=y CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" @@ -17450,6 +28516,14 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_CC_NO_ARRAY_BOUNDS=y CONFIG_ARCH_SUPPORTS_INT128=y CONFIG_NUMA_BALANCING=y +@@ -160,6 +163,7 @@ CONFIG_MEMCG=y + # CONFIG_MEMCG_V1_RECLAIM is not set + # CONFIG_MEMCG_MEMFS_INFO is not set + CONFIG_MEMCG_KMEM=y ++# CONFIG_MEMCG_KMEM_STOCK is not set + CONFIG_BLK_CGROUP=y + CONFIG_CGROUP_WRITEBACK=y + # CONFIG_CGROUP_V1_WRITEBACK is not set @@ -169,7 +173,6 @@ CONFIG_FAIR_GROUP_SCHED=y CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y @@ -17458,16 +28532,16 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y -@@ -226,7 +229,7 @@ CONFIG_SYSFS_SYSCALL=y - CONFIG_FHANDLE=y - CONFIG_POSIX_TIMERS=y - CONFIG_PRINTK=y --CONFIG_BUG=y -+# CONFIG_BUG is not set - CONFIG_ELF_CORE=y - CONFIG_BASE_FULL=y - CONFIG_FUTEX=y -@@ -244,6 +247,8 @@ CONFIG_KALLSYMS=y +@@ -186,6 +189,8 @@ CONFIG_SOCK_CGROUP_DATA=y + # CONFIG_CGROUP_V1_KILL is not set + # CONFIG_CGROUP_V1_STAT is not set + # CONFIG_CGROUP_FILES is not set ++# CONFIG_CGROUP_IFS is not set ++# CONFIG_UCOUNTS_PERCPU_COUNTER is not set + CONFIG_NAMESPACES=y + CONFIG_UTS_NS=y + CONFIG_TIME_NS=y +@@ -244,6 +249,8 @@ CONFIG_KALLSYMS=y # CONFIG_KALLSYMS_SELFTEST is not set CONFIG_KALLSYMS_ALL=y CONFIG_KALLSYMS_BASE_RELATIVE=y @@ -17476,7 +28550,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_KCMP=y CONFIG_RSEQ=y CONFIG_CACHESTAT_SYSCALL=y -@@ -262,8 +267,6 @@ CONFIG_DEBUG_PERF_USE_VMALLOC=y +@@ -262,8 +269,6 @@ CONFIG_DEBUG_PERF_USE_VMALLOC=y CONFIG_SYSTEM_DATA_VERIFICATION=y CONFIG_PROFILING=y CONFIG_TRACEPOINTS=y @@ -17485,7 +28559,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # # Kexec and crash features -@@ -288,16 +291,16 @@ CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=17 +@@ -288,6 +293,7 @@ CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=17 CONFIG_RISCV_SBI=y CONFIG_MMU=y CONFIG_PAGE_OFFSET=0xff60000000000000 @@ -17493,10 +28567,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_ARCH_SELECT_MEMORY_MODEL=y CONFIG_ARCH_SUPPORTS_UPROBES=y - CONFIG_STACKTRACE_SUPPORT=y --CONFIG_GENERIC_BUG=y --CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y - CONFIG_GENERIC_CALIBRATE_DELAY=y +@@ -298,6 +304,7 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_GENERIC_CSUM=y CONFIG_GENERIC_HWEIGHT=y CONFIG_FIX_EARLYCON_MEM=y @@ -17504,7 +28575,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_PGTABLE_LEVELS=5 CONFIG_LOCKDEP_SUPPORT=y CONFIG_RISCV_DMA_NONCOHERENT=y -@@ -306,13 +309,15 @@ CONFIG_RISCV_DMA_NONCOHERENT=y +@@ -306,15 +313,20 @@ CONFIG_RISCV_DMA_NONCOHERENT=y # SoC selection # # CONFIG_SOC_MICROCHIP_POLARFIRE is not set @@ -17521,8 +28592,32 @@ index 61f2b2f12589..c39bbc3701b3 100644 +CONFIG_ARCH_XUANTIE=y CONFIG_ARCH_VIRT=y CONFIG_SOC_VIRT=y ++CONFIG_SOC_SPACEMIT=y ++CONFIG_SOC_SPACEMIT_K1=y ++CONFIG_SOC_SPACEMIT_K1X=y # end of SoC selection -@@ -339,7 +344,7 @@ CONFIG_ARCH_RV64I=y + + # +@@ -330,6 +342,18 @@ CONFIG_ERRATA_THEAD_CMO=y + CONFIG_ERRATA_THEAD_PMU=y + # end of CPU errata selection + ++# ++# Vendor extensions ++# ++CONFIG_RISCV_ISA_VENDOR_EXT=y ++ ++# ++# Andes ++# ++CONFIG_RISCV_ISA_VENDOR_EXT_ANDES=y ++# end of Andes ++# end of Vendor extensions ++ + # + # Platform type + # +@@ -339,7 +363,7 @@ CONFIG_ARCH_RV64I=y CONFIG_CMODEL_MEDANY=y CONFIG_MODULE_SECTIONS=y CONFIG_SMP=y @@ -17531,41 +28626,34 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_NR_CPUS=512 CONFIG_HOTPLUG_CPU=y CONFIG_TUNE_GENERIC=y -@@ -351,9 +356,8 @@ CONFIG_RISCV_ISA_C=y +@@ -351,11 +375,14 @@ CONFIG_RISCV_ISA_C=y CONFIG_RISCV_ISA_SVNAPOT=y CONFIG_RISCV_ISA_SVPBMT=y CONFIG_TOOLCHAIN_HAS_V=y -CONFIG_RISCV_ISA_V=y -CONFIG_RISCV_ISA_V_DEFAULT_ENABLE=y --CONFIG_RISCV_ISA_ZICBOM=y +# CONFIG_RISCV_ISA_V is not set -+# CONFIG_RISCV_ISA_ZICBOM is not set ++CONFIG_RISCV_ISA_ZAWRS=y ++CONFIG_TOOLCHAIN_HAS_ZBB=y ++CONFIG_RISCV_ISA_ZBB=y ++CONFIG_TOOLCHAIN_HAS_ZBC=y ++CONFIG_RISCV_ISA_ZBC=y + CONFIG_RISCV_ISA_ZICBOM=y CONFIG_RISCV_ISA_ZICBOZ=y - CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE=y +-CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE=y CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI=y -@@ -400,27 +404,17 @@ CONFIG_PORTABLE=y - # - # Power management options - # --CONFIG_SUSPEND=y --CONFIG_SUSPEND_FREEZER=y --# CONFIG_SUSPEND_SKIP_SYNC is not set --CONFIG_PM_SLEEP=y --CONFIG_PM_SLEEP_SMP=y --# CONFIG_PM_AUTOSLEEP is not set --# CONFIG_PM_USERSPACE_AUTOSLEEP is not set --# CONFIG_PM_WAKELOCKS is not set -+# CONFIG_SUSPEND is not set - CONFIG_PM=y - CONFIG_PM_DEBUG=y - # CONFIG_PM_ADVANCED_DEBUG is not set --# CONFIG_PM_TEST_SUSPEND is not set --CONFIG_PM_SLEEP_DEBUG=y - # CONFIG_DPM_WATCHDOG is not set - CONFIG_PM_CLK=y - CONFIG_PM_GENERIC_DOMAINS=y - # CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set --CONFIG_PM_GENERIC_DOMAINS_SLEEP=y + CONFIG_FPU=y + CONFIG_IRQ_STACKS=y +@@ -390,6 +417,7 @@ CONFIG_COMPAT=y + CONFIG_CMDLINE="" + CONFIG_EFI_STUB=y + CONFIG_EFI=y ++CONFIG_DMI=y + CONFIG_CC_HAVE_STACKPROTECTOR_TLS=y + CONFIG_STACKPROTECTOR_PER_TASK=y + CONFIG_RISCV_ISA_FALLBACK=y +@@ -420,7 +448,7 @@ CONFIG_PM_GENERIC_DOMAINS=y + CONFIG_PM_GENERIC_DOMAINS_SLEEP=y CONFIG_PM_GENERIC_DOMAINS_OF=y CONFIG_CPU_PM=y -# CONFIG_ENERGY_MODEL is not set @@ -17573,7 +28661,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_ARCH_SUSPEND_POSSIBLE=y # end of Power management options -@@ -436,6 +430,7 @@ CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y +@@ -436,6 +464,7 @@ CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y # CONFIG_CPU_IDLE_GOV_LADDER is not set CONFIG_CPU_IDLE_GOV_MENU=y CONFIG_CPU_IDLE_GOV_TEO=y @@ -17581,15 +28669,16 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_DT_IDLE_STATES=y CONFIG_DT_IDLE_GENPD=y -@@ -471,6 +466,7 @@ CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +@@ -471,6 +500,8 @@ CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y # CONFIG_CPUFREQ_DT=y CONFIG_CPUFREQ_DT_PLATDEV=y +CONFIG_RISCV_XUANTIE_TH1520_CPUFREQ=y ++# CONFIG_ACPI_CPPC_CPUFREQ is not set # end of CPU Frequency scaling # end of CPU Power Management -@@ -485,9 +481,44 @@ CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL=y +@@ -485,9 +516,52 @@ CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL=y CONFIG_KVM_XFER_TO_GUEST_WORK=y CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y CONFIG_VIRTUALIZATION=y @@ -17600,15 +28689,20 @@ index 61f2b2f12589..c39bbc3701b3 100644 +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +# CONFIG_ACPI_DEBUGGER is not set -+# CONFIG_ACPI_SPCR_TABLE is not set ++CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +CONFIG_ACPI_FAN=y ++# CONFIG_ACPI_TAD is not set +# CONFIG_ACPI_DOCK is not set ++CONFIG_ACPI_PROCESSOR_IDLE=y ++CONFIG_ACPI_MCFG=y ++CONFIG_ACPI_PROCESSOR=y +# CONFIG_ACPI_IPMI is not set ++CONFIG_ACPI_THERMAL=y +# CONFIG_ACPI_DEBUG is not set +# CONFIG_ACPI_PCI_SLOT is not set +# CONFIG_ACPI_CONTAINER is not set @@ -17616,8 +28710,11 @@ index 61f2b2f12589..c39bbc3701b3 100644 +# CONFIG_ACPI_CUSTOM_METHOD is not set +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +# CONFIG_ACPI_NFIT is not set ++CONFIG_ACPI_NUMA=y ++# CONFIG_ACPI_HMAT is not set +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_ACPI_PFRUT is not set ++CONFIG_ACPI_PPTT=y +# CONFIG_ACPI_FFH is not set +# CONFIG_PMIC_OPREGION is not set +CONFIG_HAVE_LIVEPATCH_WO_FTRACE=y @@ -17636,15 +28733,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # # General architecture-dependent options -@@ -498,7 +529,6 @@ CONFIG_GENERIC_ENTRY=y - CONFIG_KPROBES=y - CONFIG_JUMP_LABEL=y - # CONFIG_STATIC_KEYS_SELFTEST is not set --CONFIG_KPROBES_ON_FTRACE=y - CONFIG_UPROBES=y - CONFIG_HAVE_64BIT_ALIGNED_ACCESS=y - CONFIG_KRETPROBES=y -@@ -524,6 +554,8 @@ CONFIG_HAVE_PERF_REGS=y +@@ -524,6 +598,8 @@ CONFIG_HAVE_PERF_REGS=y CONFIG_HAVE_PERF_USER_STACK_DUMP=y CONFIG_HAVE_ARCH_JUMP_LABEL=y CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y @@ -17653,7 +28742,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_MMU_LAZY_TLB_REFCOUNT=y CONFIG_HAVE_ARCH_SECCOMP=y CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -@@ -565,7 +597,7 @@ CONFIG_VMAP_STACK=y +@@ -565,7 +641,7 @@ CONFIG_VMAP_STACK=y CONFIG_ARCH_OPTIONAL_KERNEL_RWX=y CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT=y CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y @@ -17662,7 +28751,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y CONFIG_STRICT_MODULE_RWX=y CONFIG_ARCH_USE_MEMREMAP_PROT=y -@@ -576,7 +608,6 @@ CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y +@@ -576,7 +652,6 @@ CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y @@ -17670,7 +28759,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # # GCOV-based kernel profiling -@@ -585,6 +616,11 @@ CONFIG_DYNAMIC_SIGFRAME=y +@@ -585,6 +660,11 @@ CONFIG_DYNAMIC_SIGFRAME=y CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y # end of GCOV-based kernel profiling @@ -17682,7 +28771,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_HAVE_GCC_PLUGINS=y CONFIG_FUNCTION_ALIGNMENT=0 # end of General architecture-dependent options -@@ -646,6 +682,7 @@ CONFIG_BLK_INLINE_ENCRYPTION=y +@@ -646,6 +726,7 @@ CONFIG_BLK_INLINE_ENCRYPTION=y CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y # CONFIG_BLK_DEV_DETECT_WRITING_PART0 is not set # CONFIG_BLK_DEV_WRITE_MOUNTED_DUMP is not set @@ -17690,20 +28779,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_BLK_IO_HIERARCHY_STATS is not set # -@@ -692,11 +729,7 @@ CONFIG_BFQ_GROUP_IOSCHED=y - CONFIG_PREEMPT_NOTIFIERS=y - CONFIG_PADATA=y - CONFIG_ASN1=y --CONFIG_INLINE_SPIN_UNLOCK_IRQ=y --CONFIG_INLINE_READ_UNLOCK=y --CONFIG_INLINE_READ_UNLOCK_IRQ=y --CONFIG_INLINE_WRITE_UNLOCK=y --CONFIG_INLINE_WRITE_UNLOCK_IRQ=y -+CONFIG_UNINLINE_SPIN_UNLOCK=y - CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y - CONFIG_MUTEX_SPIN_ON_OWNER=y - CONFIG_RWSEM_SPIN_ON_OWNER=y -@@ -706,6 +739,8 @@ CONFIG_QUEUED_RWLOCKS=y +@@ -706,6 +787,8 @@ CONFIG_QUEUED_RWLOCKS=y CONFIG_ARCH_HAS_MMIOWB=y CONFIG_MMIOWB=y CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y @@ -17712,7 +28788,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y # CONFIG_PID_MAX_PER_NAMESPACE is not set CONFIG_FREEZER=y -@@ -771,6 +806,8 @@ CONFIG_SPARSEMEM_EXTREME=y +@@ -771,6 +854,8 @@ CONFIG_SPARSEMEM_EXTREME=y CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y CONFIG_SPARSEMEM_VMEMMAP=y CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y @@ -17721,7 +28797,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_MEMORY_ISOLATION=y CONFIG_EXCLUSIVE_SYSTEM_RAM=y CONFIG_SPLIT_PTLOCK_CPUS=4 -@@ -795,13 +832,14 @@ CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +@@ -795,13 +880,14 @@ CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y # CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set CONFIG_THP_SWAP=y # CONFIG_READ_ONLY_THP_FOR_FS is not set @@ -17739,7 +28815,16 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_GENERIC_EARLY_IOREMAP=y # CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set CONFIG_PAGE_IDLE_FLAG=y -@@ -1346,10 +1384,10 @@ CONFIG_L2TP_DEBUGFS=m +@@ -833,6 +919,8 @@ CONFIG_LOCK_MM_AND_FIND_VMA=y + # + # CONFIG_DAMON is not set + # end of Data Access Monitoring ++ ++# CONFIG_THP_CONTROL is not set + # end of Memory Management options + + CONFIG_NET=y +@@ -1346,10 +1434,10 @@ CONFIG_L2TP_DEBUGFS=m CONFIG_L2TP_V3=y CONFIG_L2TP_IP=m CONFIG_L2TP_ETH=m @@ -17752,7 +28837,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_BRIDGE_IGMP_SNOOPING=y CONFIG_BRIDGE_VLAN_FILTERING=y # CONFIG_BRIDGE_MRP is not set -@@ -1358,7 +1396,7 @@ CONFIG_BRIDGE_VLAN_FILTERING=y +@@ -1358,7 +1446,7 @@ CONFIG_BRIDGE_VLAN_FILTERING=y CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_MVRP=y @@ -17761,7 +28846,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_LLC2 is not set # CONFIG_ATALK is not set # CONFIG_X25 is not set -@@ -1513,7 +1551,44 @@ CONFIG_CAN_BCM=m +@@ -1513,7 +1601,54 @@ CONFIG_CAN_BCM=m CONFIG_CAN_GW=m # CONFIG_CAN_J1939 is not set # CONFIG_CAN_ISOTP is not set @@ -17789,11 +28874,19 @@ index 61f2b2f12589..c39bbc3701b3 100644 +# CONFIG_BT_HCIBTUSB is not set +# CONFIG_BT_HCIBTSDIO is not set +CONFIG_BT_HCIUART=y ++CONFIG_BT_HCIUART_SERDEV=y +CONFIG_BT_HCIUART_H4=y ++# CONFIG_BT_HCIUART_NOKIA is not set +# CONFIG_BT_HCIUART_BCSP is not set +# CONFIG_BT_HCIUART_ATH3K is not set ++# CONFIG_BT_HCIUART_LL is not set ++# CONFIG_BT_HCIUART_3WIRE is not set +# CONFIG_BT_HCIUART_INTEL is not set ++# CONFIG_BT_HCIUART_BCM is not set ++# CONFIG_BT_HCIUART_RTL is not set ++# CONFIG_BT_HCIUART_QCA is not set +# CONFIG_BT_HCIUART_AG6XX is not set ++# CONFIG_BT_HCIUART_MRVL is not set +# CONFIG_BT_HCIBCM203X is not set +# CONFIG_BT_HCIBCM4377 is not set +# CONFIG_BT_HCIBPA10X is not set @@ -17801,13 +28894,15 @@ index 61f2b2f12589..c39bbc3701b3 100644 +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_BT_MTKSDIO is not set ++# CONFIG_BT_MTKUART is not set +# CONFIG_BT_VIRTIO is not set ++# CONFIG_BT_NXPUART is not set +# end of Bluetooth device drivers + # CONFIG_AF_RXRPC is not set # CONFIG_AF_KCM is not set CONFIG_STREAM_PARSER=y -@@ -1522,7 +1597,7 @@ CONFIG_FIB_RULES=y +@@ -1522,7 +1657,7 @@ CONFIG_FIB_RULES=y CONFIG_WIRELESS=y CONFIG_WEXT_CORE=y CONFIG_WEXT_PROC=y @@ -17816,7 +28911,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_NL80211_TESTMODE is not set # CONFIG_CFG80211_DEVELOPER_WARNINGS is not set # CONFIG_CFG80211_CERTIFICATION_ONUS is not set -@@ -1532,7 +1607,7 @@ CONFIG_CFG80211_DEFAULT_PS=y +@@ -1532,7 +1667,7 @@ CONFIG_CFG80211_DEFAULT_PS=y # CONFIG_CFG80211_DEBUGFS is not set CONFIG_CFG80211_CRDA_SUPPORT=y CONFIG_CFG80211_WEXT=y @@ -17825,7 +28920,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_MAC80211_HAS_RC=y CONFIG_MAC80211_RC_MINSTREL=y CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y -@@ -1543,7 +1618,7 @@ CONFIG_MAC80211_DEBUGFS=y +@@ -1543,7 +1678,7 @@ CONFIG_MAC80211_DEBUGFS=y # CONFIG_MAC80211_MESSAGE_TRACING is not set # CONFIG_MAC80211_DEBUG_MENU is not set CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 @@ -17834,7 +28929,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_RFKILL_LEDS=y CONFIG_RFKILL_INPUT=y CONFIG_RFKILL_GPIO=m -@@ -1573,6 +1648,7 @@ CONFIG_FAILOVER=y +@@ -1573,6 +1708,7 @@ CONFIG_FAILOVER=y CONFIG_ETHTOOL_NETLINK=y CONFIG_NETACC_BPF=y CONFIG_NETACC_TERRACE=y @@ -17842,7 +28937,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # # Device Drivers -@@ -1595,6 +1671,7 @@ CONFIG_PCIEASPM_DEFAULT=y +@@ -1595,6 +1731,7 @@ CONFIG_PCIEASPM_DEFAULT=y CONFIG_PCIE_PME=y CONFIG_PCIE_DPC=y # CONFIG_PCIE_PTM is not set @@ -17850,7 +28945,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_PCI_MSI=y CONFIG_PCI_QUIRKS=y # CONFIG_PCI_DEBUG is not set -@@ -1606,6 +1683,7 @@ CONFIG_PCI_ECAM=y +@@ -1606,6 +1743,7 @@ CONFIG_PCI_ECAM=y CONFIG_PCI_IOV=y CONFIG_PCI_PRI=y CONFIG_PCI_PASID=y @@ -17858,7 +28953,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_PCI_DYNAMIC_OF_NODES is not set # CONFIG_PCIE_BUS_TUNE_OFF is not set CONFIG_PCIE_BUS_DEFAULT=y -@@ -1615,6 +1693,7 @@ CONFIG_PCIE_BUS_DEFAULT=y +@@ -1615,6 +1753,7 @@ CONFIG_PCIE_BUS_DEFAULT=y CONFIG_VGA_ARB=y CONFIG_VGA_ARB_MAX_GPUS=64 CONFIG_HOTPLUG_PCI=y @@ -17866,7 +28961,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_HOTPLUG_PCI_CPCI is not set CONFIG_HOTPLUG_PCI_SHPC=y -@@ -1625,6 +1704,8 @@ CONFIG_HOTPLUG_PCI_SHPC=y +@@ -1625,6 +1764,8 @@ CONFIG_HOTPLUG_PCI_SHPC=y CONFIG_PCI_HOST_COMMON=y CONFIG_PCI_HOST_GENERIC=y CONFIG_PCIE_MICROCHIP_HOST=y @@ -17875,7 +28970,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_PCIE_XILINX=y # -@@ -1636,6 +1717,7 @@ CONFIG_PCIE_CADENCE_EP=y +@@ -1636,6 +1777,7 @@ CONFIG_PCIE_CADENCE_EP=y CONFIG_PCIE_CADENCE_PLAT=y CONFIG_PCIE_CADENCE_PLAT_HOST=y CONFIG_PCIE_CADENCE_PLAT_EP=y @@ -17883,19 +28978,26 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_PCI_J721E=y CONFIG_PCI_J721E_HOST=y # CONFIG_PCI_J721E_EP is not set -@@ -1697,8 +1779,9 @@ CONFIG_FW_LOADER=y +@@ -1647,6 +1789,7 @@ CONFIG_PCI_J721E_HOST=y + CONFIG_PCIE_DW=y + CONFIG_PCIE_DW_HOST=y + CONFIG_PCIE_DW_EP=y ++CONFIG_PCIE_DW_SOPHGO=y + # CONFIG_PCI_MESON is not set + CONFIG_PCIE_DW_PLAT=y + CONFIG_PCIE_DW_PLAT_HOST=y +@@ -1697,7 +1840,9 @@ CONFIG_FW_LOADER=y CONFIG_FW_LOADER_DEBUG=y CONFIG_EXTRA_FIRMWARE="" # CONFIG_FW_LOADER_USER_HELPER is not set -# CONFIG_FW_LOADER_COMPRESS is not set --CONFIG_FW_CACHE=y +CONFIG_FW_LOADER_COMPRESS=y +# CONFIG_FW_LOADER_COMPRESS_XZ is not set +CONFIG_FW_LOADER_COMPRESS_ZSTD=y + CONFIG_FW_CACHE=y # CONFIG_FW_UPLOAD is not set # end of Firmware loader - -@@ -1709,9 +1792,10 @@ CONFIG_WANT_DEV_COREDUMP=y +@@ -1709,10 +1854,12 @@ CONFIG_WANT_DEV_COREDUMP=y # CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set # CONFIG_TEST_ASYNC_DRIVER_PROBE is not set CONFIG_GENERIC_CPU_DEVICES=y @@ -17906,9 +29008,11 @@ index 61f2b2f12589..c39bbc3701b3 100644 +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=y CONFIG_REGMAP_MMIO=y ++CONFIG_REGMAP_IRQ=y CONFIG_DMA_SHARED_BUFFER=y # CONFIG_DMA_FENCE_TRACE is not set -@@ -1724,6 +1808,8 @@ CONFIG_GENERIC_ARCH_NUMA=y + CONFIG_GENERIC_ARCH_TOPOLOGY=y +@@ -1724,6 +1871,8 @@ CONFIG_GENERIC_ARCH_NUMA=y # Bus devices # # CONFIG_MOXTET is not set @@ -17917,15 +29021,18 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_MHI_BUS is not set # CONFIG_MHI_BUS_EP is not set # end of Bus devices -@@ -1747,6 +1833,7 @@ CONFIG_PROC_EVENTS=y +@@ -1747,6 +1896,10 @@ CONFIG_PROC_EVENTS=y # end of ARM System Control and Management Interface Protocol # CONFIG_FIRMWARE_MEMMAP is not set ++CONFIG_DMIID=y ++# CONFIG_DMI_SYSFS is not set +# CONFIG_ISCSI_IBFT is not set ++# CONFIG_FW_CFG_SYSFS is not set CONFIG_SYSFB=y CONFIG_SYSFB_SIMPLEFB=y # CONFIG_GOOGLE_FIRMWARE is not set -@@ -1767,6 +1854,7 @@ CONFIG_EFI_GENERIC_STUB=y +@@ -1767,6 +1920,7 @@ CONFIG_EFI_GENERIC_STUB=y # CONFIG_RESET_ATTACK_MITIGATION is not set # CONFIG_EFI_DISABLE_PCI_DMA is not set CONFIG_EFI_EARLYCON=y @@ -17933,7 +29040,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_EFI_DISABLE_RUNTIME is not set # CONFIG_EFI_COCO_SECRET is not set # end of EFI (Extensible Firmware Interface) Support -@@ -1775,11 +1863,14 @@ CONFIG_EFI_EARLYCON=y +@@ -1775,11 +1929,14 @@ CONFIG_EFI_EARLYCON=y # Tegra firmware driver # # end of Tegra firmware driver @@ -17950,7 +29057,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # # Partition parsers -@@ -1793,9 +1884,8 @@ CONFIG_MTD_OF_PARTS=m +@@ -1793,9 +1950,8 @@ CONFIG_MTD_OF_PARTS=m # # User Modules And Translation Layers # @@ -17962,7 +29069,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # # Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. -@@ -1862,8 +1952,8 @@ CONFIG_MTD_PHYSMAP_OF=y +@@ -1862,8 +2018,8 @@ CONFIG_MTD_PHYSMAP_OF=y # CONFIG_MTD_MCHP23K256 is not set # CONFIG_MTD_MCHP48L640 is not set # CONFIG_MTD_SST25L is not set @@ -17973,7 +29080,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_MTD_MTDRAM is not set CONFIG_MTD_BLOCK2MTD=m -@@ -1876,13 +1966,15 @@ CONFIG_MTD_BLOCK2MTD=m +@@ -1876,13 +2032,15 @@ CONFIG_MTD_BLOCK2MTD=m # # NAND # @@ -17990,7 +29097,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_MTD_NAND_ECC_SW_HAMMING is not set # CONFIG_MTD_NAND_ECC_SW_BCH is not set # CONFIG_MTD_NAND_ECC_MXIC is not set -@@ -1895,12 +1987,13 @@ CONFIG_MTD_BLOCK2MTD=m +@@ -1895,12 +2053,13 @@ CONFIG_MTD_BLOCK2MTD=m # CONFIG_MTD_LPDDR is not set # end of LPDDR & LPDDR2 PCM memory drivers @@ -18006,7 +29113,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_MTD_UBI_WL_THRESHOLD=4096 CONFIG_MTD_UBI_BEB_LIMIT=20 # CONFIG_MTD_UBI_FASTMAP is not set -@@ -1921,6 +2014,13 @@ CONFIG_OF_RESOLVE=y +@@ -1921,6 +2080,13 @@ CONFIG_OF_RESOLVE=y CONFIG_OF_OVERLAY=y CONFIG_OF_NUMA=y # CONFIG_PARPORT is not set @@ -18020,7 +29127,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_BLK_DEV=y CONFIG_BLK_DEV_NULL_BLK=m CONFIG_CDROM=y -@@ -1939,7 +2039,7 @@ CONFIG_BLK_DEV_LOOP=y +@@ -1939,7 +2105,7 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 CONFIG_BLK_DEV_DRBD=m # CONFIG_DRBD_FAULT_INJECTION is not set @@ -18029,7 +29136,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_BLK_DEV_RAM=m CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=16384 -@@ -2008,7 +2108,7 @@ CONFIG_MISC_RTSX=m +@@ -2008,7 +2174,7 @@ CONFIG_MISC_RTSX=m # # EEPROM support # @@ -18038,7 +29145,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_EEPROM_AT25 is not set CONFIG_EEPROM_LEGACY=m CONFIG_EEPROM_MAX6875=m -@@ -2028,7 +2128,6 @@ CONFIG_CB710_DEBUG_ASSUMPTIONS=y +@@ -2028,7 +2194,6 @@ CONFIG_CB710_DEBUG_ASSUMPTIONS=y # CONFIG_TI_ST is not set # end of Texas Instruments shared transport line discipline @@ -18046,15 +29153,17 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_SENSORS_LIS3_I2C=m CONFIG_ALTERA_STAPL=m # CONFIG_GENWQE is not set -@@ -2110,6 +2209,7 @@ CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +@@ -2109,7 +2274,9 @@ CONFIG_SCSI_MPT3SAS=m + CONFIG_SCSI_MPT2SAS_MAX_SGE=128 CONFIG_SCSI_MPT3SAS_MAX_SGE=128 CONFIG_SCSI_MPT2SAS=m ++# CONFIG_SCSI_PS3STOR is not set # CONFIG_SCSI_MPI3MR is not set +# CONFIG_SCSI_LEAPIORAID is not set CONFIG_SCSI_SMARTPQI=m # CONFIG_SCSI_HPTIOP is not set # CONFIG_SCSI_BUSLOGIC is not set -@@ -2156,8 +2256,11 @@ CONFIG_SCSI_DH_ALUA=y +@@ -2156,8 +2323,11 @@ CONFIG_SCSI_DH_ALUA=y CONFIG_ATA=y CONFIG_SATA_HOST=y @@ -18066,7 +29175,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_SATA_PMP=y # -@@ -2168,6 +2271,7 @@ CONFIG_SATA_MOBILE_LPM_POLICY=0 +@@ -2168,6 +2338,7 @@ CONFIG_SATA_MOBILE_LPM_POLICY=0 CONFIG_SATA_AHCI_PLATFORM=y # CONFIG_AHCI_DWC is not set # CONFIG_AHCI_CEVA is not set @@ -18074,7 +29183,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_SATA_INIC162X is not set # CONFIG_SATA_ACARD_AHCI is not set # CONFIG_SATA_SIL24 is not set -@@ -2189,6 +2293,7 @@ CONFIG_ATA_PIIX=m +@@ -2189,6 +2360,7 @@ CONFIG_ATA_PIIX=m # CONFIG_SATA_MV is not set # CONFIG_SATA_NV is not set # CONFIG_SATA_PROMISE is not set @@ -18082,7 +29191,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_SATA_SIL is not set # CONFIG_SATA_SIS is not set # CONFIG_SATA_SVW is not set -@@ -2247,6 +2352,7 @@ CONFIG_ATA_PIIX=m +@@ -2247,6 +2419,7 @@ CONFIG_ATA_PIIX=m # # Generic fallback / legacy drivers # @@ -18090,7 +29199,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_ATA_GENERIC=m # CONFIG_PATA_LEGACY is not set CONFIG_MD=y -@@ -2265,14 +2371,14 @@ CONFIG_BCACHE=m +@@ -2265,14 +2438,14 @@ CONFIG_BCACHE=m # CONFIG_BCACHE_CLOSURES_DEBUG is not set # CONFIG_BCACHE_ASYNC_REGISTRATION is not set CONFIG_BLK_DEV_DM_BUILTIN=y @@ -18107,7 +29216,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m -@@ -2292,6 +2398,7 @@ CONFIG_DM_MULTIPATH_ST=m +@@ -2292,6 +2465,7 @@ CONFIG_DM_MULTIPATH_ST=m # CONFIG_DM_MULTIPATH_IOA is not set CONFIG_DM_DELAY=m # CONFIG_DM_DUST is not set @@ -18115,7 +29224,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_DM_UEVENT=y CONFIG_DM_FLAKEY=m CONFIG_DM_VERITY=m -@@ -2322,7 +2429,7 @@ CONFIG_ISCSI_TARGET_CXGB4=m +@@ -2322,7 +2496,7 @@ CONFIG_ISCSI_TARGET_CXGB4=m # end of IEEE 1394 (FireWire) support CONFIG_NETDEVICES=y @@ -18124,7 +29233,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_NET_CORE=y CONFIG_BONDING=m CONFIG_DUMMY=m -@@ -2366,10 +2473,13 @@ CONFIG_VSOCKMON=m +@@ -2366,10 +2540,13 @@ CONFIG_VSOCKMON=m CONFIG_ETHERNET=y CONFIG_MDIO=m # CONFIG_NET_VENDOR_3COM is not set @@ -18138,7 +29247,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_NET_VENDOR_ALTEON is not set # CONFIG_ALTERA_TSE is not set CONFIG_NET_VENDOR_AMAZON=y -@@ -2406,14 +2516,13 @@ CONFIG_BNXT_DCB=y +@@ -2406,14 +2583,13 @@ CONFIG_BNXT_DCB=y # CONFIG_BNXT_HWMON is not set CONFIG_NET_VENDOR_CADENCE=y CONFIG_MACB=y @@ -18154,7 +29263,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_LIQUIDIO_CORE=m CONFIG_LIQUIDIO=m CONFIG_LIQUIDIO_VF=m -@@ -2441,7 +2550,10 @@ CONFIG_NET_VENDOR_ENGLEDER=y +@@ -2441,7 +2617,10 @@ CONFIG_NET_VENDOR_ENGLEDER=y CONFIG_NET_VENDOR_FUNGIBLE=y # CONFIG_FUN_ETH is not set CONFIG_NET_VENDOR_GOOGLE=y @@ -18165,8 +29274,11 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_NET_VENDOR_I825XX is not set CONFIG_NET_VENDOR_INTEL=y # CONFIG_E100 is not set -@@ -2466,6 +2578,10 @@ CONFIG_FM10K=m +@@ -2464,8 +2643,13 @@ CONFIG_ICE=m + CONFIG_ICE_SWITCHDEV=y + CONFIG_FM10K=m # CONFIG_IGC is not set ++CONFIG_NET_VENDOR_LINKDATA=y CONFIG_NET_VENDOR_MUCSE=y # CONFIG_MXGBE is not set +# CONFIG_MXGBEVF is not set @@ -18176,7 +29288,24 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_JME is not set CONFIG_NET_VENDOR_ADI=y # CONFIG_ADIN1110 is not set -@@ -2564,13 +2680,24 @@ CONFIG_SFC_MCDI_MON=y +@@ -2506,6 +2690,8 @@ CONFIG_MLXFW=m + CONFIG_NET_VENDOR_MICROSEMI=y + # CONFIG_MSCC_OCELOT_SWITCH is not set + CONFIG_NET_VENDOR_MICROSOFT=y ++CONFIG_NET_VENDOR_MOTORCOMM=y ++# CONFIG_YT6801 is not set + CONFIG_NET_VENDOR_MYRI=y + # CONFIG_MYRI10GE is not set + # CONFIG_FEALNX is not set +@@ -2539,6 +2725,7 @@ CONFIG_QED_OOO=y + # CONFIG_NET_VENDOR_BROCADE is not set + CONFIG_NET_VENDOR_QUALCOMM=y + # CONFIG_QCA7000_SPI is not set ++# CONFIG_QCA7000_UART is not set + CONFIG_QCOM_EMAC=m + # CONFIG_RMNET is not set + # CONFIG_NET_VENDOR_RDC is not set +@@ -2564,13 +2751,24 @@ CONFIG_SFC_MCDI_MON=y CONFIG_SFC_SRIOV=y CONFIG_SFC_MCDI_LOGGING=y # CONFIG_SFC_FALCON is not set @@ -18203,12 +29332,14 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_NET_VENDOR_SUN is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_TEHUTI is not set -@@ -2584,8 +2711,12 @@ CONFIG_NGBE=m +@@ -2584,8 +2782,14 @@ CONFIG_NGBE=m CONFIG_TXGBE=m # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_XILINX is not set +CONFIG_NET_VENDOR_BZWX=y +# CONFIG_NCE is not set ++CONFIG_NET_VENDOR_SPACEMIT=y ++CONFIG_K1_EMAC=m +CONFIG_NET_VENDOR_NEBULA_MATRIX=y # CONFIG_FDDI is not set # CONFIG_HIPPI is not set @@ -18216,7 +29347,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_PHYLINK=y CONFIG_PHYLIB=y CONFIG_SWPHY=y -@@ -2661,6 +2792,7 @@ CONFIG_CAN_CALC_BITTIMING=y +@@ -2661,6 +2865,7 @@ CONFIG_CAN_CALC_BITTIMING=y # CONFIG_CAN_GRCAN is not set # CONFIG_CAN_KVASER_PCIEFD is not set CONFIG_CAN_SLCAN=m @@ -18224,7 +29355,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_CAN_C_CAN=m CONFIG_CAN_C_CAN_PLATFORM=m CONFIG_CAN_C_CAN_PCI=m -@@ -2672,6 +2804,8 @@ CONFIG_CAN_CC770_PLATFORM=m +@@ -2672,6 +2877,8 @@ CONFIG_CAN_CC770_PLATFORM=m # CONFIG_CAN_IFI_CANFD is not set # CONFIG_CAN_M_CAN is not set # CONFIG_CAN_PEAK_PCIEFD is not set @@ -18233,7 +29364,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_CAN_SJA1000=m CONFIG_CAN_EMS_PCI=m # CONFIG_CAN_F81601 is not set -@@ -2711,7 +2845,9 @@ CONFIG_MDIO_DEVICE=y +@@ -2711,7 +2918,9 @@ CONFIG_MDIO_DEVICE=y CONFIG_MDIO_BUS=y CONFIG_FWNODE_MDIO=y CONFIG_OF_MDIO=y @@ -18243,7 +29374,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_MDIO_BITBANG=m CONFIG_MDIO_BCM_UNIMAC=m CONFIG_MDIO_CAVIUM=m -@@ -2728,6 +2864,7 @@ CONFIG_MDIO_THUNDER=m +@@ -2728,6 +2937,7 @@ CONFIG_MDIO_THUNDER=m # # MDIO Multiplexers # @@ -18251,7 +29382,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_MDIO_BUS_MUX_GPIO is not set # CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set # CONFIG_MDIO_BUS_MUX_MMIOREG is not set -@@ -2735,7 +2872,7 @@ CONFIG_MDIO_THUNDER=m +@@ -2735,7 +2945,7 @@ CONFIG_MDIO_THUNDER=m # # PCS device drivers # @@ -18260,7 +29391,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # end of PCS device drivers CONFIG_PPP=m -@@ -2768,8 +2905,8 @@ CONFIG_USB_RTL8150=m +@@ -2768,8 +2978,8 @@ CONFIG_USB_RTL8150=m CONFIG_USB_RTL8152=m CONFIG_USB_LAN78XX=m CONFIG_USB_USBNET=m @@ -18271,7 +29402,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_USB_NET_CDCETHER=m CONFIG_USB_NET_CDC_EEM=m CONFIG_USB_NET_CDC_NCM=m -@@ -2781,7 +2918,7 @@ CONFIG_USB_NET_SR9700=m +@@ -2781,7 +2991,7 @@ CONFIG_USB_NET_SR9700=m CONFIG_USB_NET_SMSC75XX=m CONFIG_USB_NET_SMSC95XX=m CONFIG_USB_NET_GL620A=m @@ -18280,7 +29411,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_USB_NET_PLUSB=m CONFIG_USB_NET_MCS7830=m CONFIG_USB_NET_RNDIS_HOST=m -@@ -2865,7 +3002,39 @@ CONFIG_RT2X00_LIB_CRYPTO=y +@@ -2865,7 +3075,39 @@ CONFIG_RT2X00_LIB_CRYPTO=y CONFIG_RT2X00_LIB_LEDS=y # CONFIG_RT2X00_LIB_DEBUGFS is not set # CONFIG_RT2X00_DEBUG is not set @@ -18321,7 +29452,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_WLAN_VENDOR_RSI is not set CONFIG_WLAN_VENDOR_SILABS=y # CONFIG_WFX is not set -@@ -2876,6 +3045,10 @@ CONFIG_WLAN_VENDOR_SILABS=y +@@ -2876,6 +3118,10 @@ CONFIG_WLAN_VENDOR_SILABS=y # CONFIG_USB_NET_RNDIS_WLAN is not set # CONFIG_MAC80211_HWSIM is not set # CONFIG_VIRT_WIFI is not set @@ -18332,7 +29463,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_WAN=y CONFIG_HDLC=m CONFIG_HDLC_RAW=m -@@ -2900,6 +3073,7 @@ CONFIG_HDLC_PPP=m +@@ -2900,6 +3146,7 @@ CONFIG_HDLC_PPP=m # end of Wireless WAN # CONFIG_VMXNET3 is not set @@ -18340,7 +29471,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_USB4_NET=m # CONFIG_NETDEVSIM is not set CONFIG_NET_FAILOVER=y -@@ -2930,6 +3104,7 @@ CONFIG_INPUT_EVDEV=y +@@ -2930,6 +3177,7 @@ CONFIG_INPUT_EVDEV=y # Input Device Drivers # CONFIG_INPUT_KEYBOARD=y @@ -18348,7 +29479,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_KEYBOARD_ADP5588 is not set # CONFIG_KEYBOARD_ADP5589 is not set CONFIG_KEYBOARD_ATKBD=y -@@ -2955,6 +3130,7 @@ CONFIG_KEYBOARD_GPIO=y +@@ -2955,6 +3203,7 @@ CONFIG_KEYBOARD_GPIO=y # CONFIG_KEYBOARD_GOLDFISH_EVENTS is not set # CONFIG_KEYBOARD_STOWAWAY is not set # CONFIG_KEYBOARD_SUNKBD is not set @@ -18356,7 +29487,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_KEYBOARD_OMAP4 is not set # CONFIG_KEYBOARD_TM2_TOUCHKEY is not set # CONFIG_KEYBOARD_XTKBD is not set -@@ -2987,7 +3163,83 @@ CONFIG_MOUSE_SYNAPTICS_I2C=m +@@ -2987,7 +3236,83 @@ CONFIG_MOUSE_SYNAPTICS_I2C=m CONFIG_MOUSE_SYNAPTICS_USB=m # CONFIG_INPUT_JOYSTICK is not set # CONFIG_INPUT_TABLET is not set @@ -18441,7 +29572,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_INPUT_MISC=y # CONFIG_INPUT_AD714X is not set # CONFIG_INPUT_ATMEL_CAPTOUCH is not set -@@ -3004,7 +3256,7 @@ CONFIG_INPUT_MISC=y +@@ -3004,7 +3329,7 @@ CONFIG_INPUT_MISC=y # CONFIG_INPUT_YEALINK is not set # CONFIG_INPUT_CM109 is not set # CONFIG_INPUT_REGULATOR_HAPTIC is not set @@ -18450,7 +29581,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_INPUT_PCF8574 is not set # CONFIG_INPUT_PWM_BEEPER is not set # CONFIG_INPUT_PWM_VIBRA is not set -@@ -3017,6 +3269,7 @@ CONFIG_INPUT_UINPUT=m +@@ -3017,9 +3342,11 @@ CONFIG_INPUT_UINPUT=m # CONFIG_INPUT_IQS626A is not set # CONFIG_INPUT_IQS7222 is not set # CONFIG_INPUT_CMA3000 is not set @@ -18458,7 +29589,11 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_INPUT_DRV260X_HAPTICS is not set # CONFIG_INPUT_DRV2665_HAPTICS is not set # CONFIG_INPUT_DRV2667_HAPTICS is not set -@@ -3048,6 +3301,7 @@ CONFIG_SERIO_ALTERA_PS2=m ++CONFIG_INPUT_SPACEMIT_P1_PWRKEY=m + CONFIG_RMI4_CORE=m + CONFIG_RMI4_I2C=m + CONFIG_RMI4_SPI=m +@@ -3048,6 +3375,7 @@ CONFIG_SERIO_ALTERA_PS2=m # CONFIG_SERIO_PS2MULT is not set CONFIG_SERIO_ARC_PS2=m # CONFIG_SERIO_APBPS2 is not set @@ -18466,15 +29601,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_SERIO_GPIO_PS2 is not set # CONFIG_USERIO is not set # CONFIG_GAMEPORT is not set -@@ -3061,7 +3315,6 @@ CONFIG_TTY=y - CONFIG_VT=y - CONFIG_CONSOLE_TRANSLATIONS=y - CONFIG_VT_CONSOLE=y --CONFIG_VT_CONSOLE_SLEEP=y - CONFIG_HW_CONSOLE=y - CONFIG_VT_HW_CONSOLE_BINDING=y - CONFIG_UNIX98_PTYS=y -@@ -3075,6 +3328,7 @@ CONFIG_LDISC_AUTOLOAD=y +@@ -3075,6 +3403,7 @@ CONFIG_LDISC_AUTOLOAD=y CONFIG_SERIAL_EARLYCON=y CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set @@ -18482,7 +29609,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_SERIAL_8250_16550A_VARIANTS=y # CONFIG_SERIAL_8250_FINTEK is not set CONFIG_SERIAL_8250_CONSOLE=y -@@ -3082,8 +3336,8 @@ CONFIG_SERIAL_8250_DMA=y +@@ -3082,8 +3411,8 @@ CONFIG_SERIAL_8250_DMA=y CONFIG_SERIAL_8250_PCILIB=y CONFIG_SERIAL_8250_PCI=y CONFIG_SERIAL_8250_EXAR=y @@ -18493,7 +29620,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y # CONFIG_SERIAL_8250_PCI1XXXX is not set -@@ -3092,6 +3346,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y +@@ -3092,6 +3421,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_8250_RSA=y CONFIG_SERIAL_8250_DWLIB=y CONFIG_SERIAL_8250_DW=y @@ -18501,9 +29628,12 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_SERIAL_8250_RT288X=y CONFIG_SERIAL_8250_PERICOM=y CONFIG_SERIAL_OF_PLATFORM=y -@@ -3106,6 +3361,11 @@ CONFIG_SERIAL_OF_PLATFORM=y +@@ -3105,7 +3435,14 @@ CONFIG_SERIAL_OF_PLATFORM=y + # CONFIG_SERIAL_KGDB_NMI is not set # CONFIG_SERIAL_MAX3100 is not set # CONFIG_SERIAL_MAX310X is not set ++CONFIG_SERIAL_SPACEMIT_K1X=y ++CONFIG_SERIAL_SPACEMIT_K1X_CONSOLE=y # CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_NR_UARTS=18 @@ -18513,7 +29643,21 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_CONSOLE_POLL=y -@@ -3154,6 +3414,7 @@ CONFIG_HW_RANDOM=y +@@ -3136,10 +3473,12 @@ CONFIG_N_GSM=m + # CONFIG_NULL_TTY is not set + CONFIG_HVC_DRIVER=y + # CONFIG_RPMSG_TTY is not set +-# CONFIG_SERIAL_DEV_BUS is not set ++CONFIG_SERIAL_DEV_BUS=y ++CONFIG_SERIAL_DEV_CTRL_TTYPORT=y + # CONFIG_TTY_PRINTK is not set + CONFIG_VIRTIO_CONSOLE=y + CONFIG_IPMI_HANDLER=m ++CONFIG_IPMI_DMI_DECODE=y + CONFIG_IPMI_PLAT_DATA=y + # CONFIG_IPMI_PANIC_EVENT is not set + CONFIG_IPMI_DEVICE_INTERFACE=m +@@ -3154,6 +3493,7 @@ CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_TIMERIOMEM=m # CONFIG_HW_RANDOM_BA431 is not set CONFIG_HW_RANDOM_VIRTIO=y @@ -18521,7 +29665,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_HW_RANDOM_CCTRNG is not set # CONFIG_HW_RANDOM_XIPHERA is not set # CONFIG_HW_RANDOM_JH7110 is not set -@@ -3172,7 +3433,10 @@ CONFIG_TCG_TIS_I2C_ATMEL=m +@@ -3172,7 +3512,10 @@ CONFIG_TCG_TIS_I2C_ATMEL=m CONFIG_TCG_TIS_I2C_INFINEON=m CONFIG_TCG_TIS_I2C_NUVOTON=m CONFIG_TCG_ATMEL=m @@ -18532,7 +29676,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_TCG_TIS_ST33ZP24=m CONFIG_TCG_TIS_ST33ZP24_I2C=m CONFIG_TCG_TIS_ST33ZP24_SPI=m -@@ -3184,6 +3448,7 @@ CONFIG_TCG_TIS_ST33ZP24_SPI=m +@@ -3184,6 +3527,7 @@ CONFIG_TCG_TIS_ST33ZP24_SPI=m # I2C support # CONFIG_I2C=y @@ -18540,7 +29684,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_I2C_BOARDINFO=y CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=y -@@ -3221,6 +3486,7 @@ CONFIG_I2C_CCGX_UCSI=m +@@ -3221,6 +3565,7 @@ CONFIG_I2C_CCGX_UCSI=m # CONFIG_I2C_ALI15X3 is not set # CONFIG_I2C_AMD756 is not set # CONFIG_I2C_AMD8111 is not set @@ -18548,7 +29692,11 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_I2C_I801 is not set # CONFIG_I2C_ISCH is not set # CONFIG_I2C_PIIX4 is not set -@@ -3232,6 +3498,11 @@ CONFIG_I2C_NFORCE2=m +@@ -3229,9 +3574,15 @@ CONFIG_I2C_NFORCE2=m + # CONFIG_I2C_SIS5595 is not set + # CONFIG_I2C_SIS630 is not set + # CONFIG_I2C_SIS96X is not set ++CONFIG_I2C_SPACEMIT_K1=y # CONFIG_I2C_VIA is not set # CONFIG_I2C_VIAPRO is not set @@ -18560,7 +29708,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # # I2C system bus drivers (mostly embedded / system-on-chip) # -@@ -3243,12 +3514,17 @@ CONFIG_I2C_DESIGNWARE_PCI=m +@@ -3243,12 +3594,17 @@ CONFIG_I2C_DESIGNWARE_PCI=m # CONFIG_I2C_EMEV2 is not set CONFIG_I2C_GPIO=m # CONFIG_I2C_GPIO_FAULT_INJECTOR is not set @@ -18578,7 +29726,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # # External I2C/SMBus adapter drivers -@@ -3290,6 +3566,7 @@ CONFIG_SPI_MEM=y +@@ -3290,6 +3646,7 @@ CONFIG_SPI_MEM=y CONFIG_SPI_CADENCE=m # CONFIG_SPI_CADENCE_QUADSPI is not set # CONFIG_SPI_CADENCE_XSPI is not set @@ -18586,7 +29734,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_SPI_DESIGNWARE=y # CONFIG_SPI_DW_DMA is not set CONFIG_SPI_DW_PCI=m -@@ -3302,9 +3579,15 @@ CONFIG_SPI_DW_MMIO=y +@@ -3302,9 +3659,17 @@ CONFIG_SPI_DW_MMIO=y # CONFIG_SPI_PCI1XXXX is not set # CONFIG_SPI_PL022 is not set # CONFIG_SPI_PXA2XX is not set @@ -18597,12 +29745,14 @@ index 61f2b2f12589..c39bbc3701b3 100644 +# CONFIG_SPI_SH_HSPI is not set CONFIG_SPI_SIFIVE=y # CONFIG_SPI_SN_F_OSPI is not set ++CONFIG_SPI_SPACEMIT_K1=y ++CONFIG_SPI_SPACEMIT_K1_QSPI=y +# CONFIG_SPI_SUN4I is not set +CONFIG_SPI_SUN6I=y # CONFIG_SPI_MXIC is not set # CONFIG_SPI_XCOMM is not set # CONFIG_SPI_XILINX is not set -@@ -3319,7 +3602,7 @@ CONFIG_SPI_SIFIVE=y +@@ -3319,7 +3684,7 @@ CONFIG_SPI_SIFIVE=y # # SPI Protocol Masters # @@ -18611,7 +29761,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_SPI_LOOPBACK_TEST is not set # CONFIG_SPI_TLE62X0 is not set # CONFIG_SPI_SLAVE is not set -@@ -3343,14 +3626,8 @@ CONFIG_PPS_CLIENT_GPIO=m +@@ -3343,14 +3708,8 @@ CONFIG_PPS_CLIENT_GPIO=m # # PTP clock support # @@ -18627,7 +29777,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # end of PTP clock support CONFIG_PINCTRL=y -@@ -3360,6 +3637,7 @@ CONFIG_GENERIC_PINMUX_FUNCTIONS=y +@@ -3360,26 +3719,57 @@ CONFIG_GENERIC_PINMUX_FUNCTIONS=y CONFIG_PINCONF=y CONFIG_GENERIC_PINCONF=y # CONFIG_DEBUG_PINCTRL is not set @@ -18635,11 +29785,13 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_PINCTRL_CY8C95X0 is not set # CONFIG_PINCTRL_MCP23S08 is not set # CONFIG_PINCTRL_MICROCHIP_SGPIO is not set -@@ -3367,19 +3645,47 @@ CONFIG_GENERIC_PINCONF=y + # CONFIG_PINCTRL_OCELOT is not set # CONFIG_PINCTRL_SINGLE is not set ++CONFIG_PINCTRL_SPACEMIT_P1=m # CONFIG_PINCTRL_STMFX is not set # CONFIG_PINCTRL_SX150X is not set +CONFIG_PINCTRL_TH1520=y ++CONFIG_PINCTRL_SPACEMIT_K1X=y # # Renesas pinctrl drivers @@ -18683,7 +29835,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_GPIOLIB_IRQCHIP=y # CONFIG_DEBUG_GPIO is not set CONFIG_GPIO_SYSFS=y -@@ -3392,6 +3698,7 @@ CONFIG_GPIO_GENERIC=y +@@ -3392,6 +3782,7 @@ CONFIG_GPIO_GENERIC=y # # CONFIG_GPIO_74XX_MMIO is not set # CONFIG_GPIO_ALTERA is not set @@ -18691,7 +29843,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_GPIO_CADENCE=m CONFIG_GPIO_DWAPB=y # CONFIG_GPIO_EXAR is not set -@@ -3402,6 +3709,7 @@ CONFIG_GPIO_GENERIC_PLATFORM=m +@@ -3402,6 +3793,7 @@ CONFIG_GPIO_GENERIC_PLATFORM=m # CONFIG_GPIO_LOGICVC is not set # CONFIG_GPIO_MB86S7X is not set # CONFIG_GPIO_PL061 is not set @@ -18699,7 +29851,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_GPIO_SIFIVE=y # CONFIG_GPIO_SYSCON is not set # CONFIG_GPIO_XILINX is not set -@@ -3417,7 +3725,8 @@ CONFIG_GPIO_SIFIVE=y +@@ -3417,7 +3809,8 @@ CONFIG_GPIO_SIFIVE=y # CONFIG_GPIO_GW_PLD is not set # CONFIG_GPIO_MAX7300 is not set # CONFIG_GPIO_MAX732X is not set @@ -18709,7 +29861,15 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_GPIO_PCA9570 is not set # CONFIG_GPIO_PCF857X is not set # CONFIG_GPIO_TPIC2810 is not set -@@ -3477,6 +3786,7 @@ CONFIG_POWER_RESET_SYSCON_POWEROFF=y +@@ -3461,6 +3854,7 @@ CONFIG_GPIO_SIFIVE=y + # CONFIG_GPIO_MOCKUP is not set + # CONFIG_GPIO_VIRTIO is not set + # CONFIG_GPIO_SIM is not set ++CONFIG_GPIO_K1X=y + # end of Virtual GPIO drivers + + # CONFIG_W1 is not set +@@ -3477,6 +3871,7 @@ CONFIG_POWER_RESET_SYSCON_POWEROFF=y CONFIG_POWER_SUPPLY=y # CONFIG_POWER_SUPPLY_DEBUG is not set CONFIG_POWER_SUPPLY_HWMON=y @@ -18717,7 +29877,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_IP5XXX_POWER is not set # CONFIG_TEST_POWER is not set # CONFIG_CHARGER_ADP5061 is not set -@@ -3565,6 +3875,7 @@ CONFIG_SENSORS_G762=m +@@ -3565,6 +3960,7 @@ CONFIG_SENSORS_G762=m # CONFIG_SENSORS_HS3001 is not set CONFIG_SENSORS_IBMAEM=m CONFIG_SENSORS_IBMPEX=m @@ -18725,7 +29885,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_SENSORS_IT87=m CONFIG_SENSORS_JC42=m CONFIG_SENSORS_POWR1220=m -@@ -3600,7 +3911,7 @@ CONFIG_SENSORS_MAX31790=m +@@ -3600,7 +3996,7 @@ CONFIG_SENSORS_MAX31790=m CONFIG_SENSORS_MCP3021=m # CONFIG_SENSORS_TC654 is not set # CONFIG_SENSORS_TPS23861 is not set @@ -18734,7 +29894,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_SENSORS_ADCXX=m CONFIG_SENSORS_LM63=m CONFIG_SENSORS_LM70=m -@@ -3620,6 +3931,7 @@ CONFIG_SENSORS_LM95241=m +@@ -3620,6 +4016,7 @@ CONFIG_SENSORS_LM95241=m CONFIG_SENSORS_LM95245=m CONFIG_SENSORS_PC87360=m CONFIG_SENSORS_PC87427=m @@ -18742,7 +29902,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_SENSORS_NCT6683=m # CONFIG_SENSORS_NCT6775 is not set # CONFIG_SENSORS_NCT6775_I2C is not set -@@ -3680,7 +3992,7 @@ CONFIG_SENSORS_UCD9200=m +@@ -3680,7 +4077,7 @@ CONFIG_SENSORS_UCD9200=m # CONFIG_SENSORS_XDPE152 is not set # CONFIG_SENSORS_XDPE122 is not set CONFIG_SENSORS_ZL6100=m @@ -18751,7 +29911,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_SENSORS_SBTSI is not set # CONFIG_SENSORS_SBRMI is not set CONFIG_SENSORS_SHT15=m -@@ -3733,9 +4045,14 @@ CONFIG_SENSORS_W83L785TS=m +@@ -3733,9 +4130,14 @@ CONFIG_SENSORS_W83L785TS=m CONFIG_SENSORS_W83L786NG=m CONFIG_SENSORS_W83627HF=m CONFIG_SENSORS_W83627EHF=m @@ -18767,7 +29927,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 CONFIG_THERMAL_HWMON=y CONFIG_THERMAL_OF=y -@@ -3743,41 +4060,62 @@ CONFIG_THERMAL_OF=y +@@ -3743,41 +4145,62 @@ CONFIG_THERMAL_OF=y CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y # CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set # CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set @@ -18833,15 +29993,18 @@ index 61f2b2f12589..c39bbc3701b3 100644 # # PCI-based Watchdog Cards -@@ -3806,6 +4144,7 @@ CONFIG_BCMA_DRIVER_GPIO=y +@@ -3804,8 +4227,9 @@ CONFIG_BCMA_DRIVER_GPIO=y + # + # Multifunction device drivers # - CONFIG_MFD_CORE=m +-CONFIG_MFD_CORE=m ++CONFIG_MFD_CORE=y # CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_SUN4I_GPADC is not set # CONFIG_MFD_AS3711 is not set # CONFIG_MFD_SMPRO is not set # CONFIG_MFD_AS3722 is not set -@@ -3877,8 +4216,8 @@ CONFIG_MFD_CORE=m +@@ -3877,8 +4301,8 @@ CONFIG_MFD_CORE=m # CONFIG_MFD_SM501 is not set # CONFIG_MFD_SKY81452 is not set # CONFIG_MFD_STMPE is not set @@ -18851,7 +30014,24 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_MFD_LP3943 is not set # CONFIG_MFD_LP8788 is not set # CONFIG_MFD_TI_LMU is not set -@@ -3999,6 +4338,7 @@ CONFIG_REGULATOR_PWM=y +@@ -3921,6 +4345,8 @@ CONFIG_MFD_SYSCON=y + # CONFIG_MFD_STMFX is not set + # CONFIG_MFD_ATC260X_I2C is not set + # CONFIG_MFD_QCOM_PM8008 is not set ++CONFIG_MFD_SPACEMIT_P1=y ++# CONFIG_RAVE_SP_CORE is not set + # CONFIG_MFD_INTEL_M10_BMC_SPI is not set + # CONFIG_MFD_RSMU_I2C is not set + # CONFIG_MFD_RSMU_SPI is not set +@@ -3987,6 +4413,7 @@ CONFIG_REGULATOR_PWM=y + # CONFIG_REGULATOR_RTQ6752 is not set + # CONFIG_REGULATOR_RTQ2208 is not set + # CONFIG_REGULATOR_SLG51000 is not set ++CONFIG_REGULATOR_SPACEMIT_P1=y + # CONFIG_REGULATOR_SY8106A is not set + # CONFIG_REGULATOR_SY8824X is not set + # CONFIG_REGULATOR_SY8827N is not set +@@ -3999,6 +4426,7 @@ CONFIG_REGULATOR_PWM=y # CONFIG_REGULATOR_TPS65132 is not set # CONFIG_REGULATOR_TPS6524X is not set # CONFIG_REGULATOR_VCTRL is not set @@ -18859,7 +30039,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_RC_CORE is not set # -@@ -4007,7 +4347,7 @@ CONFIG_REGULATOR_PWM=y +@@ -4007,7 +4435,7 @@ CONFIG_REGULATOR_PWM=y # CONFIG_MEDIA_CEC_SUPPORT is not set # end of CEC support @@ -18868,7 +30048,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_MEDIA_SUPPORT_FILTER is not set # CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set -@@ -4131,10 +4471,12 @@ CONFIG_RADIO_ADAPTERS=m +@@ -4131,10 +4559,12 @@ CONFIG_RADIO_ADAPTERS=m # CONFIG_USB_RAREMONO is not set # CONFIG_RADIO_SI470X is not set CONFIG_MEDIA_PLATFORM_DRIVERS=y @@ -18883,7 +30063,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # # Allegro DVT media platform drivers -@@ -4173,6 +4515,7 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y +@@ -4173,6 +4603,7 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y # # Marvell media platform drivers # @@ -18891,7 +30071,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # # Mediatek media platform drivers -@@ -4197,6 +4540,15 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y +@@ -4197,6 +4628,15 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y # # Renesas media platform drivers # @@ -18907,7 +30087,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # # Rockchip media platform drivers -@@ -4213,6 +4565,11 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y +@@ -4213,6 +4653,11 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y # # Sunxi media platform drivers # @@ -18919,7 +30099,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # # Texas Instruments drivers -@@ -4221,6 +4578,7 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y +@@ -4221,6 +4666,7 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y # # Verisilicon media platform drivers # @@ -18927,7 +30107,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # # VIA media platform drivers -@@ -4229,6 +4587,7 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y +@@ -4229,6 +4675,7 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y # # Xilinx media platform drivers # @@ -18935,7 +30115,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # # MMC/SDIO DVB adapters -@@ -4283,6 +4642,7 @@ CONFIG_VIDEO_CAMERA_SENSOR=y +@@ -4283,6 +4730,7 @@ CONFIG_VIDEO_CAMERA_SENSOR=y # CONFIG_VIDEO_OV2659 is not set # CONFIG_VIDEO_OV2680 is not set # CONFIG_VIDEO_OV2685 is not set @@ -18943,7 +30123,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_VIDEO_OV4689 is not set # CONFIG_VIDEO_OV5640 is not set # CONFIG_VIDEO_OV5645 is not set -@@ -4304,6 +4664,7 @@ CONFIG_VIDEO_CAMERA_SENSOR=y +@@ -4304,6 +4752,7 @@ CONFIG_VIDEO_CAMERA_SENSOR=y # CONFIG_VIDEO_OV9282 is not set # CONFIG_VIDEO_OV9640 is not set # CONFIG_VIDEO_OV9650 is not set @@ -18951,7 +30131,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_VIDEO_RDACM20 is not set # CONFIG_VIDEO_RDACM21 is not set # CONFIG_VIDEO_RJ54N1 is not set -@@ -4341,6 +4702,7 @@ CONFIG_VIDEO_CAMERA_SENSOR=y +@@ -4341,6 +4790,7 @@ CONFIG_VIDEO_CAMERA_SENSOR=y # CONFIG_VIDEO_CS53L32A is not set # CONFIG_VIDEO_MSP3400 is not set # CONFIG_VIDEO_SONY_BTF_MPX is not set @@ -18959,7 +30139,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_VIDEO_TDA7432 is not set # CONFIG_VIDEO_TDA9840 is not set # CONFIG_VIDEO_TEA6415C is not set -@@ -4451,7 +4813,7 @@ CONFIG_CXD2880_SPI_DRV=m +@@ -4451,7 +4901,7 @@ CONFIG_CXD2880_SPI_DRV=m # CONFIG_VIDEO_GS1662 is not set # end of Media SPI Adapters @@ -18968,7 +30148,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # # Customize TV tuners -@@ -4668,6 +5030,7 @@ CONFIG_DVB_SP2=m +@@ -4668,6 +5118,7 @@ CONFIG_DVB_SP2=m # Graphics support # CONFIG_APERTURE_HELPERS=y @@ -18976,7 +30156,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_VIDEO_CMDLINE=y CONFIG_VIDEO_NOMODESET=y CONFIG_AUXDISPLAY=y -@@ -4679,6 +5042,7 @@ CONFIG_AUXDISPLAY=y +@@ -4679,6 +5130,7 @@ CONFIG_AUXDISPLAY=y # CONFIG_CHARLCD_BL_ON is not set CONFIG_CHARLCD_BL_FLASH=y CONFIG_DRM=y @@ -18984,7 +30164,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_DRM_DEBUG_MM is not set CONFIG_DRM_KMS_HELPER=y # CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set -@@ -4687,7 +5051,7 @@ CONFIG_DRM_FBDEV_EMULATION=y +@@ -4687,7 +5139,7 @@ CONFIG_DRM_FBDEV_EMULATION=y CONFIG_DRM_FBDEV_OVERALLOC=100 # CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set CONFIG_DRM_LOAD_EDID_FIRMWARE=y @@ -18993,7 +30173,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_DRM_DISPLAY_DP_HELPER=y CONFIG_DRM_DISPLAY_HDCP_HELPER=y CONFIG_DRM_DISPLAY_HDMI_HELPER=y -@@ -4720,7 +5084,7 @@ CONFIG_DRM_I2C_NXP_TDA998X=m +@@ -4720,7 +5172,7 @@ CONFIG_DRM_I2C_NXP_TDA998X=m CONFIG_DRM_RADEON=m CONFIG_DRM_RADEON_USERPTR=y CONFIG_DRM_AMDGPU=m @@ -19002,7 +30182,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_DRM_AMDGPU_CIK=y CONFIG_DRM_AMDGPU_USERPTR=y # CONFIG_DRM_AMDGPU_WERROR is not set -@@ -4735,9 +5099,13 @@ CONFIG_DRM_AMDGPU_USERPTR=y +@@ -4735,9 +5187,13 @@ CONFIG_DRM_AMDGPU_USERPTR=y # Display Engine Configuration # CONFIG_DRM_AMD_DC=y @@ -19016,7 +30196,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_DRM_NOUVEAU=m CONFIG_NOUVEAU_DEBUG=5 CONFIG_NOUVEAU_DEBUG_DEFAULT=3 -@@ -4749,6 +5117,9 @@ CONFIG_DRM_NOUVEAU_BACKLIGHT=y +@@ -4749,6 +5205,9 @@ CONFIG_DRM_NOUVEAU_BACKLIGHT=y CONFIG_DRM_UDL=m CONFIG_DRM_AST=m CONFIG_DRM_MGAG200=m @@ -19026,7 +30206,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_DRM_QXL=m CONFIG_DRM_VIRTIO_GPU=m CONFIG_DRM_VIRTIO_GPU_KMS=y -@@ -4759,36 +5130,89 @@ CONFIG_DRM_PANEL=y +@@ -4759,36 +5218,89 @@ CONFIG_DRM_PANEL=y # # CONFIG_DRM_PANEL_ABT_Y030XX067A is not set # CONFIG_DRM_PANEL_ARM_VERSATILE is not set @@ -19117,7 +30297,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # end of Display Panels CONFIG_DRM_BRIDGE=y -@@ -4834,10 +5258,16 @@ CONFIG_DRM_PANEL_BRIDGE=y +@@ -4834,10 +5346,16 @@ CONFIG_DRM_PANEL_BRIDGE=y # CONFIG_DRM_I2C_ADV7511 is not set # CONFIG_DRM_CDNS_DSI is not set # CONFIG_DRM_CDNS_MHDP8546 is not set @@ -19135,7 +30315,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_DRM_LOGICVC is not set # CONFIG_DRM_ARCPGU is not set CONFIG_DRM_BOCHS=m -@@ -4856,6 +5286,14 @@ CONFIG_DRM_CIRRUS_QEMU=m +@@ -4856,6 +5374,14 @@ CONFIG_DRM_CIRRUS_QEMU=m # CONFIG_TINYDRM_ST7735R is not set # CONFIG_DRM_GUD is not set # CONFIG_DRM_SSD130X is not set @@ -19150,7 +30330,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y -@@ -4894,6 +5332,7 @@ CONFIG_FB_RADEON_BACKLIGHT=y +@@ -4894,6 +5420,7 @@ CONFIG_FB_RADEON_BACKLIGHT=y # CONFIG_FB_ARK is not set # CONFIG_FB_PM3 is not set # CONFIG_FB_CARMINE is not set @@ -19158,7 +30338,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_FB_SMSCUFX is not set # CONFIG_FB_UDL is not set # CONFIG_FB_IBM_GXT4500 is not set -@@ -4919,6 +5358,7 @@ CONFIG_FB_SYS_IMAGEBLIT=y +@@ -4919,6 +5446,7 @@ CONFIG_FB_SYS_IMAGEBLIT=y # CONFIG_FB_FOREIGN_ENDIAN is not set CONFIG_FB_SYS_FOPS=y CONFIG_FB_DEFERRED_IO=y @@ -19166,7 +30346,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_FB_IOMEM_HELPERS=y CONFIG_FB_SYSMEM_HELPERS=y CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y -@@ -4946,7 +5386,7 @@ CONFIG_LCD_PLATFORM=m +@@ -4946,7 +5474,7 @@ CONFIG_LCD_PLATFORM=m CONFIG_BACKLIGHT_CLASS_DEVICE=y # CONFIG_BACKLIGHT_KTD253 is not set # CONFIG_BACKLIGHT_KTZ8866 is not set @@ -19175,7 +30355,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_BACKLIGHT_QCOM_WLED is not set # CONFIG_BACKLIGHT_ADP8860 is not set # CONFIG_BACKLIGHT_ADP8870 is not set -@@ -4960,6 +5400,7 @@ CONFIG_BACKLIGHT_GPIO=m +@@ -4960,6 +5488,7 @@ CONFIG_BACKLIGHT_GPIO=m # CONFIG_BACKLIGHT_LED is not set # end of Backlight & LCD device support @@ -19183,7 +30363,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_HDMI=y # -@@ -4983,10 +5424,13 @@ CONFIG_LOGO_LINUX_CLUT224=y +@@ -4983,10 +5512,13 @@ CONFIG_LOGO_LINUX_CLUT224=y # end of Graphics support # CONFIG_DRM_ACCEL is not set @@ -19201,7 +30381,15 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_SND_HWDEP=m CONFIG_SND_RAWMIDI=m CONFIG_SND_JACK=y -@@ -5074,6 +5518,11 @@ CONFIG_SND_HDA_INTEL=m +@@ -5010,6 +5542,7 @@ CONFIG_SND_ALOOP=m + # CONFIG_SND_PCMTEST is not set + # CONFIG_SND_MTPAV is not set + # CONFIG_SND_SERIAL_U16550 is not set ++# CONFIG_SND_SERIAL_GENERIC is not set + # CONFIG_SND_MPU401 is not set + CONFIG_SND_PCI=y + # CONFIG_SND_AD1889 is not set +@@ -5074,6 +5607,11 @@ CONFIG_SND_HDA_INTEL=m # CONFIG_SND_HDA_RECONFIG is not set # CONFIG_SND_HDA_INPUT_BEEP is not set # CONFIG_SND_HDA_PATCH_LOADER is not set @@ -19213,7 +30401,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_SND_HDA_CODEC_REALTEK is not set # CONFIG_SND_HDA_CODEC_ANALOG is not set # CONFIG_SND_HDA_CODEC_SIGMATEL is not set -@@ -5095,7 +5544,9 @@ CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 +@@ -5095,7 +5633,9 @@ CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 CONFIG_SND_HDA_CORE=m CONFIG_SND_HDA_COMPONENT=y CONFIG_SND_HDA_PREALLOC_SIZE=64 @@ -19223,7 +30411,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_SND_SPI=y CONFIG_SND_USB=y CONFIG_SND_USB_AUDIO=m -@@ -5110,7 +5561,273 @@ CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y +@@ -5110,7 +5650,273 @@ CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y # CONFIG_SND_USB_PODHD is not set # CONFIG_SND_USB_TONEPORT is not set # CONFIG_SND_USB_VARIAX is not set @@ -19498,7 +30686,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_SND_VIRTIO is not set CONFIG_HID_SUPPORT=y CONFIG_HID=y -@@ -5195,6 +5912,7 @@ CONFIG_HID_MULTITOUCH=m +@@ -5195,6 +6001,7 @@ CONFIG_HID_MULTITOUCH=m # CONFIG_HID_NINTENDO is not set # CONFIG_HID_NTI is not set CONFIG_HID_NTRIG=y @@ -19506,7 +30694,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_HID_ORTEK=m CONFIG_HID_PANTHERLORD=m # CONFIG_PANTHERLORD_FF is not set -@@ -5261,6 +5979,7 @@ CONFIG_USB_HIDDEV=y +@@ -5261,6 +6068,7 @@ CONFIG_USB_HIDDEV=y # end of USB HID support CONFIG_I2C_HID=y @@ -19514,7 +30702,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_I2C_HID_OF is not set # CONFIG_I2C_HID_OF_ELAN is not set # CONFIG_I2C_HID_OF_GOODIX is not set -@@ -5297,6 +6016,7 @@ CONFIG_USB_XHCI_HCD=y +@@ -5297,6 +6105,7 @@ CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_PCI=y # CONFIG_USB_XHCI_PCI_RENESAS is not set CONFIG_USB_XHCI_PLATFORM=y @@ -19522,7 +30710,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_ROOT_HUB_TT=y CONFIG_USB_EHCI_TT_NEWSCHED=y -@@ -5314,6 +6034,7 @@ CONFIG_USB_UHCI_HCD=y +@@ -5314,6 +6123,7 @@ CONFIG_USB_UHCI_HCD=y # CONFIG_USB_R8A66597_HCD is not set # CONFIG_USB_HCD_BCMA is not set # CONFIG_USB_HCD_TEST_MODE is not set @@ -19530,7 +30718,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # # USB Device Class drivers -@@ -5338,8 +6059,8 @@ CONFIG_USB_STORAGE_DATAFAB=m +@@ -5338,8 +6148,8 @@ CONFIG_USB_STORAGE_DATAFAB=m CONFIG_USB_STORAGE_FREECOM=m CONFIG_USB_STORAGE_ISD200=m CONFIG_USB_STORAGE_USBAT=m @@ -19541,7 +30729,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_USB_STORAGE_JUMPSHOT=m CONFIG_USB_STORAGE_ALAUDA=m CONFIG_USB_STORAGE_ONETOUCH=m -@@ -5360,7 +6081,19 @@ CONFIG_USB_MICROTEK=m +@@ -5360,7 +6170,19 @@ CONFIG_USB_MICROTEK=m # # CONFIG_USB_CDNS_SUPPORT is not set # CONFIG_USB_MUSB_HDRC is not set @@ -19562,7 +30750,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_USB_DWC2 is not set # CONFIG_USB_CHIPIDEA is not set # CONFIG_USB_ISP1760 is not set -@@ -5452,7 +6185,7 @@ CONFIG_USB_HSIC_USB3503=m +@@ -5452,7 +6274,7 @@ CONFIG_USB_HSIC_USB3503=m # CONFIG_USB_HSIC_USB4604 is not set # CONFIG_USB_LINK_LAYER_TEST is not set CONFIG_USB_CHAOSKEY=m @@ -19571,7 +30759,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_USB_ATM=m # CONFIG_USB_SPEEDTOUCH is not set CONFIG_USB_CXACRU=m -@@ -5467,7 +6200,101 @@ CONFIG_USB_XUSBATM=m +@@ -5467,7 +6289,101 @@ CONFIG_USB_XUSBATM=m # CONFIG_USB_ISP1301 is not set # end of USB Physical Layer drivers @@ -19674,7 +30862,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_TYPEC=m CONFIG_TYPEC_TCPM=m CONFIG_TYPEC_TCPCI=m -@@ -5476,6 +6303,7 @@ CONFIG_TYPEC_TCPCI=m +@@ -5476,6 +6392,7 @@ CONFIG_TYPEC_TCPCI=m # CONFIG_TYPEC_FUSB302 is not set CONFIG_TYPEC_UCSI=m # CONFIG_UCSI_CCG is not set @@ -19682,7 +30870,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_UCSI_STM32G0 is not set # CONFIG_TYPEC_TPS6598X is not set # CONFIG_TYPEC_ANX7411 is not set -@@ -5500,7 +6328,7 @@ CONFIG_TYPEC_DP_ALTMODE=m +@@ -5500,7 +6417,7 @@ CONFIG_TYPEC_DP_ALTMODE=m # CONFIG_TYPEC_NVIDIA_ALTMODE is not set # end of USB Type-C Alternate Mode drivers @@ -19691,7 +30879,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_MMC=y CONFIG_PWRSEQ_EMMC=m CONFIG_PWRSEQ_SIMPLE=m -@@ -5519,15 +6347,18 @@ CONFIG_MMC_SDHCI=y +@@ -5519,15 +6436,19 @@ CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_IO_ACCESSORS=y CONFIG_MMC_SDHCI_PCI=m CONFIG_MMC_RICOH_MMC=y @@ -19701,6 +30889,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_MMC_SDHCI_OF_AT91 is not set -# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +CONFIG_MMC_SDHCI_OF_DWCMSHC=y ++CONFIG_MMC_SDHCI_OF_K1=y CONFIG_MMC_SDHCI_CADENCE=y # CONFIG_MMC_SDHCI_F_SDH30 is not set # CONFIG_MMC_SDHCI_MILBEAUT is not set @@ -19711,7 +30900,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_MMC_CB710=m CONFIG_MMC_VIA_SDMMC=m CONFIG_MMC_DW=m -@@ -5538,18 +6369,18 @@ CONFIG_MMC_DW_BLUEFIELD=m +@@ -5538,18 +6459,18 @@ CONFIG_MMC_DW_BLUEFIELD=m # CONFIG_MMC_DW_K3 is not set CONFIG_MMC_DW_PCI=m # CONFIG_MMC_DW_STARFIVE is not set @@ -19732,7 +30921,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_SCSI_UFSHCD is not set CONFIG_MEMSTICK=m # CONFIG_MEMSTICK_DEBUG is not set -@@ -5590,7 +6421,7 @@ CONFIG_LEDS_LM3530=m +@@ -5590,7 +6511,7 @@ CONFIG_LEDS_LM3530=m # CONFIG_LEDS_LM3642 is not set # CONFIG_LEDS_LM3692X is not set # CONFIG_LEDS_PCA9532 is not set @@ -19741,7 +30930,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_LEDS_LP3944=m # CONFIG_LEDS_LP3952 is not set # CONFIG_LEDS_LP50XX is not set -@@ -5672,6 +6503,7 @@ CONFIG_INFINIBAND_USER_MEM=y +@@ -5672,6 +6593,7 @@ CONFIG_INFINIBAND_USER_MEM=y CONFIG_INFINIBAND_ON_DEMAND_PAGING=y CONFIG_INFINIBAND_ADDR_TRANS=y CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y @@ -19749,7 +30938,15 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_INFINIBAND_VIRT_DMA=y CONFIG_INFINIBAND_BNXT_RE=m CONFIG_INFINIBAND_CXGB4=m -@@ -5804,21 +6636,28 @@ CONFIG_RTC_DRV_M48T35=m +@@ -5753,6 +6675,7 @@ CONFIG_RTC_DRV_EM3027=m + # CONFIG_RTC_DRV_RV3032 is not set + CONFIG_RTC_DRV_RV8803=m + # CONFIG_RTC_DRV_SD3078 is not set ++CONFIG_RTC_DRV_SPACEMIT_P1=m + + # + # SPI RTC drivers +@@ -5804,21 +6727,28 @@ CONFIG_RTC_DRV_M48T35=m CONFIG_RTC_DRV_M48T59=m CONFIG_RTC_DRV_MSM6242=m CONFIG_RTC_DRV_RP5C01=m @@ -19778,7 +30975,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_DMADEVICES=y # CONFIG_DMADEVICES_DEBUG is not set -@@ -5826,10 +6665,13 @@ CONFIG_DMADEVICES=y +@@ -5826,12 +6756,16 @@ CONFIG_DMADEVICES=y # DMA Devices # CONFIG_DMA_ENGINE=y @@ -19792,8 +30989,11 @@ index 61f2b2f12589..c39bbc3701b3 100644 +CONFIG_DW_AXI_DMAC=y # CONFIG_FSL_EDMA is not set # CONFIG_INTEL_IDMA64 is not set ++CONFIG_SPACEMIT_K1_DMA=y # CONFIG_PL330_DMA is not set -@@ -5844,22 +6686,25 @@ CONFIG_DW_DMAC=m + # CONFIG_PLX_DMA is not set + # CONFIG_XILINX_DMA is not set +@@ -5844,6 +6778,8 @@ CONFIG_DW_DMAC=m CONFIG_DW_DMAC_PCI=m # CONFIG_DW_EDMA is not set # CONFIG_SF_PDMA is not set @@ -19802,13 +31002,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # # DMA Clients - # - CONFIG_ASYNC_TX_DMA=y --# CONFIG_DMATEST is not set -+CONFIG_DMATEST=y -+CONFIG_DMA_ENGINE_RAID=y - - # +@@ -5855,11 +6791,11 @@ CONFIG_ASYNC_TX_DMA=y # DMABUF options # CONFIG_SYNC_FILE=y @@ -19823,7 +31017,15 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_DMABUF_HEAPS is not set # CONFIG_DMABUF_SYSFS_STATS is not set # end of DMABUF options -@@ -5948,6 +6793,8 @@ CONFIG_COMMON_CLK=y +@@ -5879,6 +6815,7 @@ CONFIG_VFIO_GROUP=y + CONFIG_VFIO_CONTAINER=y + CONFIG_VFIO_NOIOMMU=y + CONFIG_VFIO_VIRQFD=y ++# CONFIG_VFIO_DEBUGFS is not set + + # + # VFIO support for PCI devices +@@ -5948,8 +6885,11 @@ CONFIG_COMMON_CLK=y # CONFIG_COMMON_CLK_VC7 is not set # CONFIG_COMMON_CLK_FIXED_MMIO is not set CONFIG_CLK_ANALOGBITS_WRPLL_CLN28HPC=y @@ -19831,8 +31033,11 @@ index 61f2b2f12589..c39bbc3701b3 100644 +# CONFIG_CLK_RCAR_USB2_CLOCK_SEL is not set CONFIG_CLK_SIFIVE=y CONFIG_CLK_SIFIVE_PRCI=y ++CONFIG_SPACEMIT_K1X_CCU=y CONFIG_CLK_STARFIVE_JH71X0=y -@@ -5959,15 +6806,27 @@ CONFIG_CLK_STARFIVE_JH7110_AON=m + CONFIG_CLK_STARFIVE_JH7100=y + CONFIG_CLK_STARFIVE_JH7100_AUDIO=m +@@ -5959,15 +6899,27 @@ CONFIG_CLK_STARFIVE_JH7110_AON=m CONFIG_CLK_STARFIVE_JH7110_STG=m CONFIG_CLK_STARFIVE_JH7110_ISP=m CONFIG_CLK_STARFIVE_JH7110_VOUT=m @@ -19860,7 +31065,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_RISCV_TIMER=y # end of Clock Source drivers -@@ -5976,8 +6835,11 @@ CONFIG_MAILBOX=y +@@ -5976,8 +6928,11 @@ CONFIG_MAILBOX=y # CONFIG_ARM_MHU_V2 is not set # CONFIG_PLATFORM_MHU is not set # CONFIG_PL320_MBOX is not set @@ -19872,15 +31077,17 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_IOMMU_API=y CONFIG_IOMMU_SUPPORT=y -@@ -5992,6 +6854,7 @@ CONFIG_IOMMU_DEFAULT_DMA_LAZY=y +@@ -5992,6 +6947,9 @@ CONFIG_IOMMU_DEFAULT_DMA_LAZY=y # CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set CONFIG_OF_IOMMU=y # CONFIG_IOMMUFD is not set ++CONFIG_RISCV_IOMMU=y ++CONFIG_RISCV_IOMMU_PCI=y +CONFIG_SUN50I_IOMMU=y # # Remoteproc drivers -@@ -6007,6 +6870,7 @@ CONFIG_RPMSG_CHAR=y +@@ -6007,6 +6965,7 @@ CONFIG_RPMSG_CHAR=y CONFIG_RPMSG_CTRL=y CONFIG_RPMSG_NS=y # CONFIG_RPMSG_QCOM_GLINK_RPM is not set @@ -19888,12 +31095,19 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_RPMSG_VIRTIO=y # end of Rpmsg drivers -@@ -6055,22 +6919,65 @@ CONFIG_RPMSG_VIRTIO=y +@@ -6055,22 +7014,72 @@ CONFIG_RPMSG_VIRTIO=y # CONFIG_QCOM_PMIC_GLINK is not set # end of Qualcomm SoC drivers +CONFIG_SOC_RENESAS=y CONFIG_SIFIVE_CCACHE=y ++ ++# ++# Spacemit SoC drivers ++# ++CONFIG_SPACEMIT_MEM_RANGE=y ++# end of Spacemit SoC drivers ++ CONFIG_JH71XX_PMU=y +CONFIG_SUNXI_SRAM=y +# CONFIG_SUN20I_PPU is not set @@ -19955,7 +31169,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_EXTCON_FSA9480 is not set CONFIG_EXTCON_GPIO=m # CONFIG_EXTCON_MAX3355 is not set -@@ -6080,7 +6987,536 @@ CONFIG_EXTCON_GPIO=m +@@ -6080,7 +7089,540 @@ CONFIG_EXTCON_GPIO=m # CONFIG_EXTCON_USB_GPIO is not set # CONFIG_EXTCON_USBC_TUSB320 is not set # CONFIG_MEMORY is not set @@ -20065,6 +31279,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 +# CONFIG_MCP3911 is not set +# CONFIG_NAU7802 is not set +# CONFIG_RICHTEK_RTQ6056 is not set ++CONFIG_SPACEMIT_P1_ADC=m +# CONFIG_SD_ADC_MODULATOR is not set +# CONFIG_SUN20I_GPADC is not set +# CONFIG_TI_ADC081C is not set @@ -20126,11 +31341,13 @@ index 61f2b2f12589..c39bbc3701b3 100644 +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set ++# CONFIG_PMS7003 is not set +# CONFIG_SCD30_CORE is not set +# CONFIG_SCD4X is not set +# CONFIG_SENSIRION_SGP30 is not set +# CONFIG_SENSIRION_SGP40 is not set +# CONFIG_SPS30_I2C is not set ++# CONFIG_SPS30_SERIAL is not set +# CONFIG_SENSEAIR_SUNRISE_CO2 is not set +# CONFIG_VZ89X is not set +# end of Chemical Sensors @@ -20286,6 +31503,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 +# CONFIG_ADIS16480 is not set +# CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set ++# CONFIG_BOSCH_BNO055_SERIAL is not set +# CONFIG_BOSCH_BNO055_I2C is not set +# CONFIG_FXOS8700_I2C is not set +# CONFIG_FXOS8700_SPI is not set @@ -20493,10 +31711,11 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_NTB is not set CONFIG_PWM=y CONFIG_PWM_SYSFS=y -@@ -6090,7 +7526,11 @@ CONFIG_PWM_SYSFS=y +@@ -6090,7 +7632,12 @@ CONFIG_PWM_SYSFS=y # CONFIG_PWM_DWC is not set # CONFIG_PWM_FSL_FTM is not set # CONFIG_PWM_PCA9685 is not set ++CONFIG_PWM_PXA=m +# CONFIG_PWM_RCAR is not set +# CONFIG_PWM_RENESAS_TPU is not set CONFIG_PWM_SIFIVE=m @@ -20505,15 +31724,18 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_PWM_XILINX is not set # -@@ -6100,12 +7540,18 @@ CONFIG_IRQCHIP=y +@@ -6099,15 +7646,24 @@ CONFIG_PWM_SIFIVE=m + CONFIG_IRQCHIP=y # CONFIG_AL_FIC is not set # CONFIG_XILINX_INTC is not set ++CONFIG_SOPHGO_SG2044_MSI=y CONFIG_RISCV_INTC=y +CONFIG_RISCV_APLIC=y +CONFIG_RISCV_APLIC_MSI=y +CONFIG_RISCV_IMSIC=y +CONFIG_RISCV_IMSIC_PCI=y CONFIG_SIFIVE_PLIC=y ++CONFIG_THEAD_C900_ACLINT_SSWI=y # end of IRQ chip support # CONFIG_IPACK_BUS is not set @@ -20523,8 +31745,11 @@ index 61f2b2f12589..c39bbc3701b3 100644 +CONFIG_RESET_TH1520=y # CONFIG_RESET_TI_SYSCON is not set # CONFIG_RESET_TI_TPS380X is not set ++CONFIG_RESET_K1X_SPACEMIT=y CONFIG_RESET_STARFIVE_JH71X0=y -@@ -6116,7 +7562,12 @@ CONFIG_RESET_STARFIVE_JH7110=y + CONFIG_RESET_STARFIVE_JH7100=y + CONFIG_RESET_STARFIVE_JH7110=y +@@ -6116,7 +7672,12 @@ CONFIG_RESET_STARFIVE_JH7110=y # PHY Subsystem # CONFIG_GENERIC_PHY=y @@ -20537,7 +31762,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # # PHY drivers for Broadcom platforms -@@ -6132,14 +7583,21 @@ CONFIG_GENERIC_PHY=y +@@ -6132,14 +7693,21 @@ CONFIG_GENERIC_PHY=y # CONFIG_PHY_PXA_28NM_HSIC is not set # CONFIG_PHY_PXA_28NM_USB2 is not set # CONFIG_PHY_LAN966X_SERDES is not set @@ -20559,7 +31784,15 @@ index 61f2b2f12589..c39bbc3701b3 100644 # end of PHY Subsystem # CONFIG_POWERCAP is not set -@@ -6191,7 +7649,9 @@ CONFIG_NVMEM_SYSFS=y +@@ -6151,6 +7719,7 @@ CONFIG_GENERIC_PHY=y + CONFIG_RISCV_PMU=y + CONFIG_RISCV_PMU_LEGACY=y + CONFIG_RISCV_PMU_SBI=y ++CONFIG_ANDES_CUSTOM_PMU=y + # end of Performance monitor support + + CONFIG_RAS=y +@@ -6191,7 +7760,9 @@ CONFIG_NVMEM_SYSFS=y # end of Layout Types # CONFIG_NVMEM_RMEM is not set @@ -20569,7 +31802,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # # HW tracing support -@@ -6202,6 +7662,8 @@ CONFIG_NVMEM_SYSFS=y +@@ -6202,6 +7773,8 @@ CONFIG_NVMEM_SYSFS=y # CONFIG_FPGA is not set # CONFIG_FSI is not set @@ -20578,7 +31811,15 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_PM_OPP=y # CONFIG_SIOX is not set # CONFIG_SLIMBUS is not set -@@ -6235,6 +7697,7 @@ CONFIG_EXT4_FS_POSIX_ACL=y +@@ -6223,6 +7796,7 @@ CONFIG_INTERCONNECT=y + # + CONFIG_VALIDATE_FS_PARSER=y + CONFIG_FS_IOMAP=y ++CONFIG_FS_STACK=y + CONFIG_BUFFER_HEAD=y + CONFIG_LEGACY_DIRECT_IO=y + # CONFIG_EXT2_FS is not set +@@ -6235,6 +7809,7 @@ CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y # CONFIG_EXT4_DEBUG is not set # CONFIG_EXT4_ERROR_REPORT is not set @@ -20586,7 +31827,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_JBD2=y # CONFIG_JBD2_DEBUG is not set CONFIG_FS_MBCACHE=y -@@ -6289,10 +7752,10 @@ CONFIG_QUOTA_TREE=y +@@ -6289,10 +7864,11 @@ CONFIG_QUOTA_TREE=y CONFIG_QFMT_V2=y CONFIG_QUOTACTL=y CONFIG_AUTOFS_FS=y @@ -20595,11 +31836,23 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_CUSE=m CONFIG_VIRTIO_FS=m -CONFIG_OVERLAY_FS=m ++CONFIG_FUSE_PASSTHROUGH=y +CONFIG_OVERLAY_FS=y # CONFIG_OVERLAY_FS_REDIRECT_DIR is not set CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y # CONFIG_OVERLAY_FS_INDEX is not set -@@ -6382,8 +7845,24 @@ CONFIG_MISC_FILESYSTEMS=y +@@ -6363,9 +7939,9 @@ CONFIG_TMPFS_XATTR=y + # CONFIG_TMPFS_QUOTA is not set + CONFIG_ARCH_SUPPORTS_HUGETLBFS=y + CONFIG_HUGETLBFS=y ++# CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON is not set + CONFIG_HUGETLB_PAGE=y + CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y +-# CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON is not set + # CONFIG_HUGETLB_ALLOC_LIMIT is not set + CONFIG_ARCH_HAS_GIGANTIC_PAGE=y + CONFIG_CONFIGFS_FS=y +@@ -6382,8 +7958,24 @@ CONFIG_MISC_FILESYSTEMS=y # CONFIG_BEFS_FS is not set # CONFIG_BFS_FS is not set # CONFIG_EFS_FS is not set @@ -20626,15 +31879,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_CRAMFS=m CONFIG_CRAMFS_BLOCKDEV=y # CONFIG_CRAMFS_MTD is not set -@@ -6416,7 +7895,6 @@ CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 - CONFIG_PSTORE_COMPRESS=y - # CONFIG_PSTORE_CONSOLE is not set - # CONFIG_PSTORE_PMSG is not set --# CONFIG_PSTORE_FTRACE is not set - CONFIG_PSTORE_RAM=m - # CONFIG_PSTORE_BLK is not set - # CONFIG_SYSV_FS is not set -@@ -6432,7 +7910,7 @@ CONFIG_NFS_V4=y +@@ -6432,7 +8024,7 @@ CONFIG_NFS_V4=y CONFIG_NFS_V4_1=y CONFIG_NFS_V4_2=y CONFIG_PNFS_FILE_LAYOUT=y @@ -20643,7 +31888,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_PNFS_FLEXFILE_LAYOUT=m CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" # CONFIG_NFS_V4_1_MIGRATION is not set -@@ -6518,7 +7996,7 @@ CONFIG_NLS_ISO8859_8=m +@@ -6518,7 +8110,7 @@ CONFIG_NLS_ISO8859_8=m CONFIG_NLS_CODEPAGE_1250=m CONFIG_NLS_CODEPAGE_1251=m CONFIG_NLS_ASCII=y @@ -20652,7 +31897,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_NLS_ISO8859_2=m CONFIG_NLS_ISO8859_3=m CONFIG_NLS_ISO8859_4=m -@@ -6557,6 +8035,7 @@ CONFIG_KEYS=y +@@ -6557,6 +8149,7 @@ CONFIG_KEYS=y CONFIG_PERSISTENT_KEYRINGS=y CONFIG_TRUSTED_KEYS=y CONFIG_TRUSTED_KEYS_TPM=y @@ -20660,7 +31905,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_ENCRYPTED_KEYS=y # CONFIG_USER_DECRYPTED_DATA is not set # CONFIG_KEY_DH_OPERATIONS is not set -@@ -6635,6 +8114,7 @@ CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +@@ -6635,6 +8228,7 @@ CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y # CONFIG_IMA_DISABLE_HTABLE is not set # CONFIG_IMA_DIGEST_LIST is not set @@ -20668,7 +31913,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_EVM=y # CONFIG_EVM_DEFAULT_HASH_SHA1 is not set CONFIG_EVM_DEFAULT_HASH_SHA256=y -@@ -6657,6 +8137,8 @@ CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,appar +@@ -6657,6 +8251,8 @@ CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,appar # Memory initialization # CONFIG_INIT_STACK_NONE=y @@ -20677,7 +31922,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set # CONFIG_INIT_ON_FREE_DEFAULT_ON is not set # CONFIG_ZERO_CALL_USED_REGS is not set -@@ -6671,8 +8153,6 @@ CONFIG_LIST_HARDENED=y +@@ -6671,8 +8267,6 @@ CONFIG_LIST_HARDENED=y CONFIG_RANDSTRUCT_NONE=y # end of Kernel hardening options @@ -20686,7 +31931,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # end of Security options CONFIG_XOR_BLOCKS=m -@@ -6693,6 +8173,7 @@ CONFIG_CRYPTO_ALGAPI=y +@@ -6693,6 +8287,7 @@ CONFIG_CRYPTO_ALGAPI=y CONFIG_CRYPTO_ALGAPI2=y CONFIG_CRYPTO_AEAD=y CONFIG_CRYPTO_AEAD2=y @@ -20694,7 +31939,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_CRYPTO_SIG2=y CONFIG_CRYPTO_SKCIPHER=y CONFIG_CRYPTO_SKCIPHER2=y -@@ -6704,18 +8185,18 @@ CONFIG_CRYPTO_RNG_DEFAULT=y +@@ -6704,18 +8299,18 @@ CONFIG_CRYPTO_RNG_DEFAULT=y CONFIG_CRYPTO_AKCIPHER2=y CONFIG_CRYPTO_AKCIPHER=y CONFIG_CRYPTO_KPP2=y @@ -20716,7 +31961,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_ENGINE=y # end of Crypto core or helper -@@ -6724,14 +8205,14 @@ CONFIG_CRYPTO_ENGINE=y +@@ -6724,14 +8319,14 @@ CONFIG_CRYPTO_ENGINE=y # Public-key cryptography # CONFIG_CRYPTO_RSA=y @@ -20735,7 +31980,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # end of Public-key cryptography # -@@ -6747,7 +8228,7 @@ CONFIG_CRYPTO_CAMELLIA=m +@@ -6747,7 +8342,7 @@ CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAST_COMMON=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m @@ -20744,7 +31989,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m -@@ -6764,7 +8245,7 @@ CONFIG_CRYPTO_TWOFISH_COMMON=m +@@ -6764,7 +8359,7 @@ CONFIG_CRYPTO_TWOFISH_COMMON=m # # CONFIG_CRYPTO_ADIANTUM is not set CONFIG_CRYPTO_ARC4=m @@ -20753,7 +31998,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_CRYPTO_CBC=y # CONFIG_CRYPTO_CFB is not set CONFIG_CRYPTO_CTR=y -@@ -6773,35 +8254,35 @@ CONFIG_CRYPTO_ECB=y +@@ -6773,35 +8368,35 @@ CONFIG_CRYPTO_ECB=y # CONFIG_CRYPTO_HCTR2 is not set # CONFIG_CRYPTO_KEYWRAP is not set CONFIG_CRYPTO_LRW=m @@ -20796,7 +32041,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_SHA1=y CONFIG_CRYPTO_SHA256=y -@@ -6864,6 +8345,10 @@ CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +@@ -6864,6 +8459,10 @@ CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y CONFIG_CRYPTO_HASH_INFO=y CONFIG_CRYPTO_HW=y @@ -20807,7 +32052,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_CRYPTO_DEV_ATMEL_ECC is not set # CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set # CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set -@@ -6871,6 +8356,7 @@ CONFIG_CRYPTO_HW=y +@@ -6871,6 +8470,7 @@ CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_QAT_C3XXX is not set # CONFIG_CRYPTO_DEV_QAT_C62X is not set # CONFIG_CRYPTO_DEV_QAT_4XXX is not set @@ -20815,7 +32060,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set # CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set # CONFIG_CRYPTO_DEV_QAT_C62XVF is not set -@@ -6933,16 +8419,16 @@ CONFIG_GENERIC_PCI_IOMAP=y +@@ -6933,16 +8533,16 @@ CONFIG_GENERIC_PCI_IOMAP=y # CONFIG_CRYPTO_LIB_UTILS=y CONFIG_CRYPTO_LIB_AES=y @@ -20837,7 +32082,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_CRYPTO_LIB_POLY1305 is not set # CONFIG_CRYPTO_LIB_CHACHA20POLY1305 is not set CONFIG_CRYPTO_LIB_SHA1=y -@@ -7013,6 +8499,7 @@ CONFIG_HAS_IOPORT_MAP=y +@@ -7013,6 +8613,7 @@ CONFIG_HAS_IOPORT_MAP=y CONFIG_HAS_DMA=y CONFIG_NEED_DMA_MAP_STATE=y CONFIG_ARCH_DMA_ADDR_T_64BIT=y @@ -20845,7 +32090,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_DMA_DECLARE_COHERENT=y CONFIG_ARCH_HAS_SETUP_DMA_OPS=y CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y -@@ -7032,7 +8519,7 @@ CONFIG_DMA_CMA=y +@@ -7032,7 +8633,7 @@ CONFIG_DMA_CMA=y # # Default contiguous memory area size: # @@ -20854,7 +32099,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_CMA_SIZE_SEL_MBYTES=y # CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set # CONFIG_CMA_SIZE_SEL_MIN is not set -@@ -7043,7 +8530,6 @@ CONFIG_DMA_MAP_BENCHMARK=y +@@ -7043,7 +8644,6 @@ CONFIG_DMA_MAP_BENCHMARK=y CONFIG_SGL_ALLOC=y CONFIG_CHECK_SIGNATURE=y # CONFIG_CPUMASK_OFFSTACK is not set @@ -20862,15 +32107,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_CPU_RMAP=y CONFIG_DQL=y CONFIG_GLOB=y -@@ -7096,7 +8582,6 @@ CONFIG_BOOT_PRINTK_DELAY=y - CONFIG_DYNAMIC_DEBUG=y - CONFIG_DYNAMIC_DEBUG_CORE=y - CONFIG_SYMBOLIC_ERRNAME=y --CONFIG_DEBUG_BUGVERBOSE=y - # end of printk and dmesg options - - CONFIG_DEBUG_KERNEL=y -@@ -7113,6 +8598,7 @@ CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +@@ -7113,6 +8713,7 @@ CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y # CONFIG_DEBUG_INFO_REDUCED is not set CONFIG_DEBUG_INFO_COMPRESSED_NONE=y # CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set @@ -20878,7 +32115,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_DEBUG_INFO_BTF=y CONFIG_PAHOLE_HAS_SPLIT_BTF=y CONFIG_PAHOLE_HAS_LANG_EXCLUDE=y -@@ -7179,7 +8665,6 @@ CONFIG_SLUB_DEBUG=y +@@ -7179,7 +8780,6 @@ CONFIG_SLUB_DEBUG=y # CONFIG_PAGE_TABLE_CHECK is not set # CONFIG_PAGE_POISONING is not set # CONFIG_DEBUG_PAGE_REF is not set @@ -20886,15 +32123,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_ARCH_HAS_DEBUG_WX=y # CONFIG_DEBUG_WX is not set CONFIG_GENERIC_PTDUMP=y -@@ -7242,6 +8727,7 @@ CONFIG_SCHEDSTATS=y - # end of Scheduler Debugging - - # CONFIG_DEBUG_TIMEKEEPING is not set -+# CONFIG_DEBUG_PREEMPT is not set - - # - # Lock Debugging (spinlocks, mutexes, etc...) -@@ -7257,7 +8743,7 @@ CONFIG_LOCK_DEBUGGING_SUPPORT=y +@@ -7257,7 +8857,7 @@ CONFIG_LOCK_DEBUGGING_SUPPORT=y # CONFIG_DEBUG_LOCK_ALLOC is not set CONFIG_DEBUG_ATOMIC_SLEEP=y # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set @@ -20903,7 +32132,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_WW_MUTEX_SELFTEST is not set # CONFIG_SCF_TORTURE_TEST is not set # CONFIG_CSD_LOCK_WAIT_DEBUG is not set -@@ -7281,8 +8767,9 @@ CONFIG_DEBUG_LIST=y +@@ -7281,8 +8881,9 @@ CONFIG_DEBUG_LIST=y # # RCU Debugging # @@ -20914,54 +32143,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 # CONFIG_RCU_REF_SCALE_TEST is not set CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 -@@ -7298,7 +8785,6 @@ CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 - CONFIG_NOP_TRACER=y - CONFIG_HAVE_RETHOOK=y - CONFIG_RETHOOK=y --CONFIG_HAVE_FUNCTION_TRACER=y - CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y - CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y - CONFIG_HAVE_DYNAMIC_FTRACE=y -@@ -7315,15 +8801,8 @@ CONFIG_GENERIC_TRACER=y - CONFIG_TRACING_SUPPORT=y - CONFIG_FTRACE=y - CONFIG_BOOTTIME_TRACING=y --CONFIG_FUNCTION_TRACER=y --CONFIG_FUNCTION_GRAPH_TRACER=y --# CONFIG_FUNCTION_GRAPH_RETVAL is not set --CONFIG_DYNAMIC_FTRACE=y --CONFIG_DYNAMIC_FTRACE_WITH_REGS=y --# CONFIG_FPROBE is not set --CONFIG_FUNCTION_PROFILER=y --CONFIG_STACK_TRACER=y - # CONFIG_IRQSOFF_TRACER is not set -+# CONFIG_PREEMPT_TRACER is not set - CONFIG_SCHED_TRACER=y - CONFIG_HWLAT_TRACER=y - CONFIG_OSNOISE_TRACER=y -@@ -7336,21 +8815,17 @@ CONFIG_BRANCH_PROFILE_NONE=y - CONFIG_BLK_DEV_IO_TRACE=y - CONFIG_PROBE_EVENTS_BTF_ARGS=y - CONFIG_KPROBE_EVENTS=y --# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set - CONFIG_UPROBE_EVENTS=y - CONFIG_BPF_EVENTS=y - CONFIG_DYNAMIC_EVENTS=y - CONFIG_PROBE_EVENTS=y - # CONFIG_BPF_KPROBE_OVERRIDE is not set --CONFIG_FTRACE_MCOUNT_RECORD=y --CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT=y - CONFIG_SYNTH_EVENTS=y - # CONFIG_USER_EVENTS is not set - # CONFIG_TRACE_EVENT_INJECT is not set - # CONFIG_TRACEPOINT_BENCHMARK is not set - CONFIG_RING_BUFFER_BENCHMARK=m - # CONFIG_TRACE_EVAL_MAP_FILE is not set --# CONFIG_FTRACE_RECORD_RECURSION is not set - # CONFIG_FTRACE_STARTUP_TEST is not set - # CONFIG_RING_BUFFER_STARTUP_TEST is not set - # CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set -@@ -7358,7 +8833,37 @@ CONFIG_RING_BUFFER_BENCHMARK=m +@@ -7358,7 +8959,38 @@ CONFIG_RING_BUFFER_BENCHMARK=m # CONFIG_SYNTH_EVENT_GEN_TEST is not set # CONFIG_KPROBE_EVENT_GEN_TEST is not set # CONFIG_RV is not set @@ -20971,6 +32153,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 +# CONFIG_SAMPLE_TRACE_EVENTS is not set +# CONFIG_SAMPLE_TRACE_CUSTOM_EVENTS is not set +# CONFIG_SAMPLE_TRACE_PRINTK is not set ++# CONFIG_SAMPLE_FTRACE_OPS is not set +# CONFIG_SAMPLE_TRACE_ARRAY is not set +# CONFIG_SAMPLE_KOBJECT is not set +# CONFIG_SAMPLE_KPROBES is not set @@ -21000,7 +32183,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_STRICT_DEVMEM=y CONFIG_IO_STRICT_DEVMEM=y -@@ -7376,7 +8881,47 @@ CONFIG_FUNCTION_ERROR_INJECTION=y +@@ -7376,7 +9008,47 @@ CONFIG_FUNCTION_ERROR_INJECTION=y # CONFIG_FAULT_INJECTION is not set CONFIG_ARCH_HAS_KCOV=y # CONFIG_KCOV is not set @@ -21049,7 +32232,7 @@ index 61f2b2f12589..c39bbc3701b3 100644 CONFIG_ARCH_USE_MEMTEST=y # CONFIG_MEMTEST is not set # end of Kernel Testing and Coverage -@@ -7388,9 +8933,3 @@ CONFIG_ARCH_USE_MEMTEST=y +@@ -7388,9 +9060,3 @@ CONFIG_ARCH_USE_MEMTEST=y # end of Kernel hacking # CONFIG_KWORKER_NUMA_AFFINITY is not set @@ -21550,22 +32733,436 @@ index 000000000000..10d67d6ff963 +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_V4L_MEM2MEM_DRIVERS=y +diff --git a/arch/riscv/errata/andes/errata.c b/arch/riscv/errata/andes/errata.c +index 17a904869724..fc1a34faa5f3 100644 +--- a/arch/riscv/errata/andes/errata.c ++++ b/arch/riscv/errata/andes/errata.c +@@ -17,10 +17,11 @@ + #include + #include + #include ++#include + +-#define ANDESTECH_AX45MP_MARCHID 0x8000000000008a45UL +-#define ANDESTECH_AX45MP_MIMPID 0x500UL +-#define ANDESTECH_SBI_EXT_ANDES 0x0900031E ++#define ANDES_AX45MP_MARCHID 0x8000000000008a45UL ++#define ANDES_AX45MP_MIMPID 0x500UL ++#define ANDES_SBI_EXT_ANDES 0x0900031E + + #define ANDES_SBI_EXT_IOCP_SW_WORKAROUND 1 + +@@ -32,7 +33,7 @@ static long ax45mp_iocp_sw_workaround(void) + * ANDES_SBI_EXT_IOCP_SW_WORKAROUND SBI EXT checks if the IOCP is missing and + * cache is controllable only then CMO will be applied to the platform. + */ +- ret = sbi_ecall(ANDESTECH_SBI_EXT_ANDES, ANDES_SBI_EXT_IOCP_SW_WORKAROUND, ++ ret = sbi_ecall(ANDES_SBI_EXT_ANDES, ANDES_SBI_EXT_IOCP_SW_WORKAROUND, + 0, 0, 0, 0, 0, 0); + + return ret.error ? 0 : ret.value; +@@ -50,7 +51,7 @@ static void errata_probe_iocp(unsigned int stage, unsigned long arch_id, unsigne + + done = true; + +- if (arch_id != ANDESTECH_AX45MP_MARCHID || impid != ANDESTECH_AX45MP_MIMPID) ++ if (arch_id != ANDES_AX45MP_MARCHID || impid != ANDES_AX45MP_MIMPID) + return; + + if (!ax45mp_iocp_sw_workaround()) +@@ -65,6 +66,8 @@ void __init_or_module andes_errata_patch_func(struct alt_entry *begin, struct al + unsigned long archid, unsigned long impid, + unsigned int stage) + { ++ BUILD_BUG_ON(ERRATA_ANDES_NUMBER >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE); ++ + if (stage == RISCV_ALTERNATIVES_BOOT) + errata_probe_iocp(stage, archid, impid); + +diff --git a/arch/riscv/errata/sifive/errata.c b/arch/riscv/errata/sifive/errata.c +index 3d9a32d791f7..b68b023115c2 100644 +--- a/arch/riscv/errata/sifive/errata.c ++++ b/arch/riscv/errata/sifive/errata.c +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + + struct errata_info_t { + char name[32]; +@@ -91,6 +92,8 @@ void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, + u32 cpu_apply_errata = 0; + u32 tmp; + ++ BUILD_BUG_ON(ERRATA_SIFIVE_NUMBER >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE); ++ + if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) + return; + +diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c +index 0554ed4bf087..928d8f7fe288 100644 +--- a/arch/riscv/errata/thead/errata.c ++++ b/arch/riscv/errata/thead/errata.c +@@ -16,6 +16,7 @@ + #include + #include + #include ++#include + + static bool errata_probe_pbmt(unsigned int stage, + unsigned long arch_id, unsigned long impid) +@@ -95,6 +96,8 @@ void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, + u32 tmp; + void *oldptr, *altptr; + ++ BUILD_BUG_ON(ERRATA_THEAD_NUMBER >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE); ++ + for (alt = begin; alt < end; alt++) { + if (alt->vendor_id != THEAD_VENDOR_ID) + continue; +diff --git a/arch/riscv/include/asm/acpi.h b/arch/riscv/include/asm/acpi.h +index d5604d2073bc..e0a1f84404f3 100644 +--- a/arch/riscv/include/asm/acpi.h ++++ b/arch/riscv/include/asm/acpi.h +@@ -61,11 +61,16 @@ static inline void arch_fix_phys_package_id(int num, u32 slot) { } + + void acpi_init_rintc_map(void); + struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu); +-u32 get_acpi_id_for_cpu(int cpu); ++static inline u32 get_acpi_id_for_cpu(int cpu) ++{ ++ return acpi_cpu_get_madt_rintc(cpu)->uid; ++} ++ + int acpi_get_riscv_isa(struct acpi_table_header *table, + unsigned int cpu, const char **isa); + +-static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; } ++void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size, ++ u32 *cboz_size, u32 *cbop_size); + #else + static inline void acpi_init_rintc_map(void) { } + static inline struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu) +@@ -79,6 +84,18 @@ static inline int acpi_get_riscv_isa(struct acpi_table_header *table, + return -EINVAL; + } + ++static inline void acpi_get_cbo_block_size(struct acpi_table_header *table, ++ u32 *cbom_size, u32 *cboz_size, ++ u32 *cbop_size) { } ++ + #endif /* CONFIG_ACPI */ + ++#ifdef CONFIG_ACPI_NUMA ++int acpi_numa_get_nid(unsigned int cpu); ++void acpi_map_cpus_to_nodes(void); ++#else ++static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; } ++static inline void acpi_map_cpus_to_nodes(void) { } ++#endif /* CONFIG_ACPI_NUMA */ ++ + #endif /*_ASM_ACPI_H*/ +diff --git a/arch/riscv/include/asm/arch_hweight.h b/arch/riscv/include/asm/arch_hweight.h +new file mode 100644 +index 000000000000..85b2c443823e +--- /dev/null ++++ b/arch/riscv/include/asm/arch_hweight.h +@@ -0,0 +1,78 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Based on arch/x86/include/asm/arch_hweight.h ++ */ ++ ++#ifndef _ASM_RISCV_HWEIGHT_H ++#define _ASM_RISCV_HWEIGHT_H ++ ++#include ++#include ++ ++#if (BITS_PER_LONG == 64) ++#define CPOPW "cpopw " ++#elif (BITS_PER_LONG == 32) ++#define CPOPW "cpop " ++#else ++#error "Unexpected BITS_PER_LONG" ++#endif ++ ++static __always_inline unsigned int __arch_hweight32(unsigned int w) ++{ ++#ifdef CONFIG_RISCV_ISA_ZBB ++ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, ++ RISCV_ISA_EXT_ZBB, 1) ++ : : : : legacy); ++ ++ asm (".option push\n" ++ ".option arch,+zbb\n" ++ CPOPW "%0, %0\n" ++ ".option pop\n" ++ : "+r" (w) : :); ++ ++ return w; ++ ++legacy: ++#endif ++ return __sw_hweight32(w); ++} ++ ++static inline unsigned int __arch_hweight16(unsigned int w) ++{ ++ return __arch_hweight32(w & 0xffff); ++} ++ ++static inline unsigned int __arch_hweight8(unsigned int w) ++{ ++ return __arch_hweight32(w & 0xff); ++} ++ ++#if BITS_PER_LONG == 64 ++static __always_inline unsigned long __arch_hweight64(__u64 w) ++{ ++# ifdef CONFIG_RISCV_ISA_ZBB ++ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, ++ RISCV_ISA_EXT_ZBB, 1) ++ : : : : legacy); ++ ++ asm (".option push\n" ++ ".option arch,+zbb\n" ++ "cpop %0, %0\n" ++ ".option pop\n" ++ : "+r" (w) : :); ++ ++ return w; ++ ++legacy: ++# endif ++ return __sw_hweight64(w); ++} ++#else /* BITS_PER_LONG == 64 */ ++static inline unsigned long __arch_hweight64(__u64 w) ++{ ++ return __arch_hweight32((u32)w) + ++ __arch_hweight32((u32)(w >> 32)); ++} ++#endif /* !(BITS_PER_LONG == 64) */ ++ ++#endif /* _ASM_RISCV_HWEIGHT_H */ +diff --git a/arch/riscv/include/asm/archrandom.h b/arch/riscv/include/asm/archrandom.h +new file mode 100644 +index 000000000000..5345360adfb9 +--- /dev/null ++++ b/arch/riscv/include/asm/archrandom.h +@@ -0,0 +1,72 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Kernel interface for the RISCV arch_random_* functions ++ * ++ * Copyright (c) 2023 Rivos Inc. ++ * ++ */ ++ ++#ifndef ASM_RISCV_ARCHRANDOM_H ++#define ASM_RISCV_ARCHRANDOM_H ++ ++#include ++#include ++ ++#define SEED_RETRY_LOOPS 100 ++ ++static inline bool __must_check csr_seed_long(unsigned long *v) ++{ ++ unsigned int retry = SEED_RETRY_LOOPS, valid_seeds = 0; ++ const int needed_seeds = sizeof(long) / sizeof(u16); ++ u16 *entropy = (u16 *)v; ++ ++ do { ++ /* ++ * The SEED CSR must be accessed with a read-write instruction. ++ */ ++ unsigned long csr_seed = csr_swap(CSR_SEED, 0); ++ unsigned long opst = csr_seed & SEED_OPST_MASK; ++ ++ switch (opst) { ++ case SEED_OPST_ES16: ++ entropy[valid_seeds++] = csr_seed & SEED_ENTROPY_MASK; ++ if (valid_seeds == needed_seeds) ++ return true; ++ break; ++ ++ case SEED_OPST_DEAD: ++ pr_err_once("archrandom: Unrecoverable error\n"); ++ return false; ++ ++ case SEED_OPST_BIST: ++ case SEED_OPST_WAIT: ++ default: ++ cpu_relax(); ++ continue; ++ } ++ } while (--retry); ++ ++ return false; ++} ++ ++static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs) ++{ ++ return 0; ++} ++ ++static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs) ++{ ++ if (!max_longs) ++ return 0; ++ ++ /* ++ * If Zkr is supported and csr_seed_long succeeds, we return one long ++ * worth of entropy. ++ */ ++ if (riscv_has_extension_likely(RISCV_ISA_EXT_ZKR) && csr_seed_long(v)) ++ return 1; ++ ++ return 0; ++} ++ ++#endif /* ASM_RISCV_ARCHRANDOM_H */ +diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h +index f5dfef6c2153..0e0522e588ca 100644 +--- a/arch/riscv/include/asm/atomic.h ++++ b/arch/riscv/include/asm/atomic.h +@@ -17,7 +17,6 @@ + #endif + + #include +-#include + + #define __atomic_acquire_fence() \ + __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory") +@@ -207,7 +206,7 @@ static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int + " add %[rc], %[p], %[a]\n" + " sc.w.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : [a]"r" (a), [u]"r" (u) +@@ -228,7 +227,7 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, + " add %[rc], %[p], %[a]\n" + " sc.d.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : [a]"r" (a), [u]"r" (u) +@@ -248,7 +247,7 @@ static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v) + " addi %[rc], %[p], 1\n" + " sc.w.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : +@@ -268,7 +267,7 @@ static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v) + " addi %[rc], %[p], -1\n" + " sc.w.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : +@@ -288,7 +287,7 @@ static __always_inline int arch_atomic_dec_if_positive(atomic_t *v) + " bltz %[rc], 1f\n" + " sc.w.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : +@@ -310,7 +309,7 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v) + " addi %[rc], %[p], 1\n" + " sc.d.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : +@@ -331,7 +330,7 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v) + " addi %[rc], %[p], -1\n" + " sc.d.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : +@@ -352,7 +351,7 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) + " bltz %[rc], 1f\n" + " sc.d.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h -index 110752594228..2b1f98b7e9bf 100644 +index 110752594228..feebe8e02ae0 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h -@@ -29,12 +29,22 @@ +@@ -11,13 +11,13 @@ + #define _ASM_RISCV_BARRIER_H + + #ifndef __ASSEMBLY__ ++#include ++#include + + #define nop() __asm__ __volatile__ ("nop") + #define __nops(n) ".rept " #n "\nnop\n.endr\n" + #define nops(n) __asm__ __volatile__ (__nops(n)) + +-#define RISCV_FENCE(p, s) \ +- __asm__ __volatile__ ("fence " #p "," #s : : : "memory") + + /* These barriers need to enforce ordering on both devices or memory. */ + #define mb() RISCV_FENCE(iorw,iorw) +@@ -29,21 +29,6 @@ #define __smp_rmb() RISCV_FENCE(r,r) #define __smp_wmb() RISCV_FENCE(w,w) +-#define __smp_store_release(p, v) \ +-do { \ +- compiletime_assert_atomic_type(*p); \ +- RISCV_FENCE(rw,w); \ +- WRITE_ONCE(*p, v); \ +-} while (0) +- +-#define __smp_load_acquire(p) \ +-({ \ +- typeof(*p) ___p1 = READ_ONCE(*p); \ +- compiletime_assert_atomic_type(*p); \ +- RISCV_FENCE(r,rw); \ +- ___p1; \ +-}) +- + /* + * This is a very specific barrier: it's currently only used in two places in + * the kernel, both in the scheduler. See include/linux/spinlock.h for the two +@@ -71,6 +56,45 @@ do { \ + */ + #define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw) + +#ifdef CONFIG_ARCH_SOPHGO - #define __smp_store_release(p, v) \ - do { \ - compiletime_assert_atomic_type(*p); \ - RISCV_FENCE(rw,w); \ - WRITE_ONCE(*p, v); \ ++#define __smp_store_release(p, v) \ ++do { \ ++ compiletime_assert_atomic_type(*p); \ ++ RISCV_FENCE(rw,w); \ ++ WRITE_ONCE(*p, v); \ + RISCV_FENCE(w,rw); \ - } while (0) ++} while (0) +#else +#define __smp_store_release(p, v) \ +do { \ @@ -21574,32 +33171,1156 @@ index 110752594228..2b1f98b7e9bf 100644 + WRITE_ONCE(*p, v); \ +} while (0) +#endif - - #define __smp_load_acquire(p) \ - ({ \ -@@ -44,6 +54,18 @@ do { \ - ___p1; \ - }) - -+#define smp_cond_load_acquire(ptr, cond_expr) ({ \ ++ ++#define __smp_load_acquire(p) \ ++({ \ ++ typeof(*p) ___p1 = READ_ONCE(*p); \ ++ compiletime_assert_atomic_type(*p); \ ++ RISCV_FENCE(r, rw); \ ++ ___p1; \ ++}) ++ ++#ifdef CONFIG_RISCV_ISA_ZAWRS ++#define smp_cond_load_relaxed(ptr, cond_expr) ({ \ + typeof(ptr) __PTR = (ptr); \ + __unqual_scalar_typeof(*ptr) VAL; \ + for (;;) { \ -+ VAL = __smp_load_acquire(__PTR); \ ++ VAL = READ_ONCE(*__PTR); \ + if (cond_expr) \ + break; \ -+ cpu_relax(); \ ++ __cmpwait_relaxed(ptr, VAL); \ + } \ + (typeof(*ptr))VAL; \ +}) ++#endif ++ + #include + + #endif /* __ASSEMBLY__ */ +diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h +index 3540b690944b..3cdcc2bbaaf5 100644 +--- a/arch/riscv/include/asm/bitops.h ++++ b/arch/riscv/include/asm/bitops.h +@@ -15,15 +15,265 @@ + #include + #include + ++#if !defined(CONFIG_RISCV_ISA_ZBB) || defined(NO_ALTERNATIVE) + #include +-#include +-#include + #include ++#include ++#include ++ ++#else ++#include ++#include ++ ++#if (BITS_PER_LONG == 64) ++#define CTZW "ctzw " ++#define CLZW "clzw " ++#elif (BITS_PER_LONG == 32) ++#define CTZW "ctz " ++#define CLZW "clz " ++#else ++#error "Unexpected BITS_PER_LONG" ++#endif ++ ++static __always_inline unsigned long variable__ffs(unsigned long word) ++{ ++ int num; ++ ++ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, ++ RISCV_ISA_EXT_ZBB, 1) ++ : : : : legacy); ++ ++ asm volatile (".option push\n" ++ ".option arch,+zbb\n" ++ "ctz %0, %1\n" ++ ".option pop\n" ++ : "=r" (word) : "r" (word) :); ++ ++ return word; ++ ++legacy: ++ num = 0; ++#if BITS_PER_LONG == 64 ++ if ((word & 0xffffffff) == 0) { ++ num += 32; ++ word >>= 32; ++ } ++#endif ++ if ((word & 0xffff) == 0) { ++ num += 16; ++ word >>= 16; ++ } ++ if ((word & 0xff) == 0) { ++ num += 8; ++ word >>= 8; ++ } ++ if ((word & 0xf) == 0) { ++ num += 4; ++ word >>= 4; ++ } ++ if ((word & 0x3) == 0) { ++ num += 2; ++ word >>= 2; ++ } ++ if ((word & 0x1) == 0) ++ num += 1; ++ return num; ++} ++ ++/** ++ * __ffs - find first set bit in a long word ++ * @word: The word to search ++ * ++ * Undefined if no set bit exists, so code should check against 0 first. ++ */ ++#define __ffs(word) \ ++ (__builtin_constant_p(word) ? \ ++ (unsigned long)__builtin_ctzl(word) : \ ++ variable__ffs(word)) ++ ++static __always_inline unsigned long variable__fls(unsigned long word) ++{ ++ int num; ++ ++ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, ++ RISCV_ISA_EXT_ZBB, 1) ++ : : : : legacy); ++ ++ asm volatile (".option push\n" ++ ".option arch,+zbb\n" ++ "clz %0, %1\n" ++ ".option pop\n" ++ : "=r" (word) : "r" (word) :); ++ ++ return BITS_PER_LONG - 1 - word; ++ ++legacy: ++ num = BITS_PER_LONG - 1; ++#if BITS_PER_LONG == 64 ++ if (!(word & (~0ul << 32))) { ++ num -= 32; ++ word <<= 32; ++ } ++#endif ++ if (!(word & (~0ul << (BITS_PER_LONG - 16)))) { ++ num -= 16; ++ word <<= 16; ++ } ++ if (!(word & (~0ul << (BITS_PER_LONG - 8)))) { ++ num -= 8; ++ word <<= 8; ++ } ++ if (!(word & (~0ul << (BITS_PER_LONG - 4)))) { ++ num -= 4; ++ word <<= 4; ++ } ++ if (!(word & (~0ul << (BITS_PER_LONG - 2)))) { ++ num -= 2; ++ word <<= 2; ++ } ++ if (!(word & (~0ul << (BITS_PER_LONG - 1)))) ++ num -= 1; ++ return num; ++} ++ ++/** ++ * __fls - find last set bit in a long word ++ * @word: the word to search ++ * ++ * Undefined if no set bit exists, so code should check against 0 first. ++ */ ++#define __fls(word) \ ++ (__builtin_constant_p(word) ? \ ++ (unsigned long)(BITS_PER_LONG - 1 - __builtin_clzl(word)) : \ ++ variable__fls(word)) ++ ++static __always_inline int variable_ffs(int x) ++{ ++ int r; ++ ++ if (!x) ++ return 0; ++ ++ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, ++ RISCV_ISA_EXT_ZBB, 1) ++ : : : : legacy); ++ ++ asm volatile (".option push\n" ++ ".option arch,+zbb\n" ++ CTZW "%0, %1\n" ++ ".option pop\n" ++ : "=r" (r) : "r" (x) :); ++ ++ return r + 1; ++ ++legacy: ++ r = 1; ++ if (!(x & 0xffff)) { ++ x >>= 16; ++ r += 16; ++ } ++ if (!(x & 0xff)) { ++ x >>= 8; ++ r += 8; ++ } ++ if (!(x & 0xf)) { ++ x >>= 4; ++ r += 4; ++ } ++ if (!(x & 3)) { ++ x >>= 2; ++ r += 2; ++ } ++ if (!(x & 1)) { ++ x >>= 1; ++ r += 1; ++ } ++ return r; ++} ++ ++/** ++ * ffs - find first set bit in a word ++ * @x: the word to search ++ * ++ * This is defined the same way as the libc and compiler builtin ffs routines. ++ * ++ * ffs(value) returns 0 if value is 0 or the position of the first set bit if ++ * value is nonzero. The first (least significant) bit is at position 1. ++ */ ++#define ffs(x) (__builtin_constant_p(x) ? __builtin_ffs(x) : variable_ffs(x)) ++ ++static __always_inline int variable_fls(unsigned int x) ++{ ++ int r; ++ ++ if (!x) ++ return 0; ++ ++ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, ++ RISCV_ISA_EXT_ZBB, 1) ++ : : : : legacy); ++ ++ asm volatile (".option push\n" ++ ".option arch,+zbb\n" ++ CLZW "%0, %1\n" ++ ".option pop\n" ++ : "=r" (r) : "r" (x) :); ++ ++ return 32 - r; ++ ++legacy: ++ r = 32; ++ if (!(x & 0xffff0000u)) { ++ x <<= 16; ++ r -= 16; ++ } ++ if (!(x & 0xff000000u)) { ++ x <<= 8; ++ r -= 8; ++ } ++ if (!(x & 0xf0000000u)) { ++ x <<= 4; ++ r -= 4; ++ } ++ if (!(x & 0xc0000000u)) { ++ x <<= 2; ++ r -= 2; ++ } ++ if (!(x & 0x80000000u)) { ++ x <<= 1; ++ r -= 1; ++ } ++ return r; ++} ++ ++/** ++ * fls - find last set bit in a word ++ * @x: the word to search ++ * ++ * This is defined in a similar way as ffs, but returns the position of the most ++ * significant set bit. ++ * ++ * fls(value) returns 0 if value is 0 or the position of the last set bit if ++ * value is nonzero. The last (most significant) bit is at position 32. ++ */ ++#define fls(x) \ ++({ \ ++ typeof(x) x_ = (x); \ ++ __builtin_constant_p(x_) ? \ ++ (int)((x_ != 0) ? (32 - __builtin_clz(x_)) : 0) \ ++ : \ ++ variable_fls(x_); \ ++}) ++ ++#endif /* !defined(CONFIG_RISCV_ISA_ZBB) || defined(NO_ALTERNATIVE) */ ++ ++#include + #include + #include +-#include + +-#include ++#include ++ ++#include + + #if (BITS_PER_LONG == 64) + #define __AMO(op) "amo" #op ".d" +diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h +index 2f4726d3cfcc..1f4cd12e4664 100644 +--- a/arch/riscv/include/asm/cmpxchg.h ++++ b/arch/riscv/include/asm/cmpxchg.h +@@ -8,143 +8,87 @@ + + #include + +-#include ++#include + #include ++#include ++#include ++#include + +-#define __xchg_relaxed(ptr, new, size) \ ++#define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n) \ + ({ \ +- __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(new) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- switch (size) { \ +- case 4: \ +- __asm__ __volatile__ ( \ +- " amoswap.w %0, %2, %1\n" \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ +- break; \ +- case 8: \ +- __asm__ __volatile__ ( \ +- " amoswap.d %0, %2, %1\n" \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ +- break; \ +- default: \ +- BUILD_BUG(); \ +- } \ +- __ret; \ ++ u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ ++ ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ ++ ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ ++ << __s; \ ++ ulong __newx = (ulong)(n) << __s; \ ++ ulong __retx; \ ++ ulong __rc; \ ++ \ ++ __asm__ __volatile__ ( \ ++ prepend \ ++ "0: lr.w %0, %2\n" \ ++ " and %1, %0, %z4\n" \ ++ " or %1, %1, %z3\n" \ ++ " sc.w" sc_sfx " %1, %1, %2\n" \ ++ " bnez %1, 0b\n" \ ++ append \ ++ : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ ++ : "rJ" (__newx), "rJ" (~__mask) \ ++ : "memory"); \ ++ \ ++ r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ + }) + +-#define arch_xchg_relaxed(ptr, x) \ ++#define __arch_xchg(sfx, prepend, append, r, p, n) \ + ({ \ +- __typeof__(*(ptr)) _x_ = (x); \ +- (__typeof__(*(ptr))) __xchg_relaxed((ptr), \ +- _x_, sizeof(*(ptr))); \ ++ __asm__ __volatile__ ( \ ++ prepend \ ++ " amoswap" sfx " %0, %2, %1\n" \ ++ append \ ++ : "=r" (r), "+A" (*(p)) \ ++ : "r" (n) \ ++ : "memory"); \ + }) + +-#define __xchg_acquire(ptr, new, size) \ ++#define _arch_xchg(ptr, new, sc_sfx, swap_sfx, prepend, \ ++ sc_append, swap_append) \ + ({ \ + __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(new) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- switch (size) { \ ++ __typeof__(*(__ptr)) __new = (new); \ ++ __typeof__(*(__ptr)) __ret; \ ++ \ ++ switch (sizeof(*__ptr)) { \ ++ case 1: \ ++ case 2: \ ++ __arch_xchg_masked(sc_sfx, prepend, sc_append, \ ++ __ret, __ptr, __new); \ ++ break; \ + case 4: \ +- __asm__ __volatile__ ( \ +- " amoswap.w %0, %2, %1\n" \ +- RISCV_ACQUIRE_BARRIER \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ ++ __arch_xchg(".w" swap_sfx, prepend, swap_append, \ ++ __ret, __ptr, __new); \ + break; \ + case 8: \ +- __asm__ __volatile__ ( \ +- " amoswap.d %0, %2, %1\n" \ +- RISCV_ACQUIRE_BARRIER \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ ++ __arch_xchg(".d" swap_sfx, prepend, swap_append, \ ++ __ret, __ptr, __new); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ +- __ret; \ ++ (__typeof__(*(__ptr)))__ret; \ + }) + +-#define arch_xchg_acquire(ptr, x) \ +-({ \ +- __typeof__(*(ptr)) _x_ = (x); \ +- (__typeof__(*(ptr))) __xchg_acquire((ptr), \ +- _x_, sizeof(*(ptr))); \ +-}) ++#define arch_xchg_relaxed(ptr, x) \ ++ _arch_xchg(ptr, x, "", "", "", "", "") + +-#define __xchg_release(ptr, new, size) \ +-({ \ +- __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(new) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- switch (size) { \ +- case 4: \ +- __asm__ __volatile__ ( \ +- RISCV_RELEASE_BARRIER \ +- " amoswap.w %0, %2, %1\n" \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ +- break; \ +- case 8: \ +- __asm__ __volatile__ ( \ +- RISCV_RELEASE_BARRIER \ +- " amoswap.d %0, %2, %1\n" \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ +- break; \ +- default: \ +- BUILD_BUG(); \ +- } \ +- __ret; \ +-}) ++#define arch_xchg_acquire(ptr, x) \ ++ _arch_xchg(ptr, x, "", "", "", \ ++ RISCV_ACQUIRE_BARRIER, RISCV_ACQUIRE_BARRIER) + + #define arch_xchg_release(ptr, x) \ +-({ \ +- __typeof__(*(ptr)) _x_ = (x); \ +- (__typeof__(*(ptr))) __xchg_release((ptr), \ +- _x_, sizeof(*(ptr))); \ +-}) +- +-#define __arch_xchg(ptr, new, size) \ +-({ \ +- __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(new) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- switch (size) { \ +- case 4: \ +- __asm__ __volatile__ ( \ +- " amoswap.w.aqrl %0, %2, %1\n" \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ +- break; \ +- case 8: \ +- __asm__ __volatile__ ( \ +- " amoswap.d.aqrl %0, %2, %1\n" \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ +- break; \ +- default: \ +- BUILD_BUG(); \ +- } \ +- __ret; \ +-}) ++ _arch_xchg(ptr, x, "", "", RISCV_RELEASE_BARRIER, "", "") + + #define arch_xchg(ptr, x) \ +-({ \ +- __typeof__(*(ptr)) _x_ = (x); \ +- (__typeof__(*(ptr))) __arch_xchg((ptr), _x_, sizeof(*(ptr))); \ +-}) ++ _arch_xchg(ptr, x, ".rl", ".aqrl", "", RISCV_FULL_BARRIER, "") + + #define xchg32(ptr, x) \ + ({ \ +@@ -163,190 +107,128 @@ + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + */ +-#define __cmpxchg_relaxed(ptr, old, new, size) \ +-({ \ +- __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(*(ptr)) __old = (old); \ +- __typeof__(*(ptr)) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- register unsigned int __rc; \ +- switch (size) { \ +- case 4: \ +- __asm__ __volatile__ ( \ +- "0: lr.w %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.w %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" ((long)__old), "rJ" (__new) \ +- : "memory"); \ +- break; \ +- case 8: \ +- __asm__ __volatile__ ( \ +- "0: lr.d %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.d %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" (__old), "rJ" (__new) \ +- : "memory"); \ +- break; \ +- default: \ +- BUILD_BUG(); \ +- } \ +- __ret; \ +-}) + +-#define arch_cmpxchg_relaxed(ptr, o, n) \ +-({ \ +- __typeof__(*(ptr)) _o_ = (o); \ +- __typeof__(*(ptr)) _n_ = (n); \ +- (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \ +- _o_, _n_, sizeof(*(ptr))); \ ++#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, prepend, append, r, p, o, n) \ ++({ \ ++ if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \ ++ IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \ ++ riscv_has_extension_unlikely(RISCV_ISA_EXT_ZABHA) && \ ++ riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \ ++ r = o; \ ++ \ ++ __asm__ __volatile__ ( \ ++ prepend \ ++ " amocas" cas_sfx " %0, %z2, %1\n" \ ++ append \ ++ : "+&r" (r), "+A" (*(p)) \ ++ : "rJ" (n) \ ++ : "memory"); \ ++ } else { \ ++ u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ ++ ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ ++ ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ ++ << __s; \ ++ ulong __newx = (ulong)(n) << __s; \ ++ ulong __oldx = (ulong)(o) << __s; \ ++ ulong __retx; \ ++ ulong __rc; \ ++ \ ++ __asm__ __volatile__ ( \ ++ prepend \ ++ "0: lr.w %0, %2\n" \ ++ " and %1, %0, %z5\n" \ ++ " bne %1, %z3, 1f\n" \ ++ " and %1, %0, %z6\n" \ ++ " or %1, %1, %z4\n" \ ++ " sc.w" sc_sfx " %1, %1, %2\n" \ ++ " bnez %1, 0b\n" \ ++ append \ ++ "1:\n" \ ++ : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ ++ : "rJ" ((long)__oldx), "rJ" (__newx), \ ++ "rJ" (__mask), "rJ" (~__mask) \ ++ : "memory"); \ ++ \ ++ r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ ++ } \ + }) + +-#define __cmpxchg_acquire(ptr, old, new, size) \ ++#define __arch_cmpxchg(lr_sfx, sc_cas_sfx, prepend, append, r, p, co, o, n) \ + ({ \ +- __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(*(ptr)) __old = (old); \ +- __typeof__(*(ptr)) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- register unsigned int __rc; \ +- switch (size) { \ +- case 4: \ ++ if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \ ++ riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \ ++ r = o; \ ++ \ + __asm__ __volatile__ ( \ +- "0: lr.w %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.w %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- RISCV_ACQUIRE_BARRIER \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" ((long)__old), "rJ" (__new) \ ++ prepend \ ++ " amocas" sc_cas_sfx " %0, %z2, %1\n" \ ++ append \ ++ : "+&r" (r), "+A" (*(p)) \ ++ : "rJ" (n) \ + : "memory"); \ +- break; \ +- case 8: \ ++ } else { \ ++ register unsigned int __rc; \ ++ \ + __asm__ __volatile__ ( \ +- "0: lr.d %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.d %1, %z4, %2\n" \ ++ prepend \ ++ "0: lr" lr_sfx " %0, %2\n" \ ++ " bne %0, %z3, 1f\n" \ ++ " sc" sc_cas_sfx " %1, %z4, %2\n" \ + " bnez %1, 0b\n" \ +- RISCV_ACQUIRE_BARRIER \ ++ append \ + "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" (__old), "rJ" (__new) \ ++ : "=&r" (r), "=&r" (__rc), "+A" (*(p)) \ ++ : "rJ" (co o), "rJ" (n) \ + : "memory"); \ +- break; \ +- default: \ +- BUILD_BUG(); \ + } \ +- __ret; \ + }) + +-#define arch_cmpxchg_acquire(ptr, o, n) \ +-({ \ +- __typeof__(*(ptr)) _o_ = (o); \ +- __typeof__(*(ptr)) _n_ = (n); \ +- (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \ +- _o_, _n_, sizeof(*(ptr))); \ +-}) +- +-#define __cmpxchg_release(ptr, old, new, size) \ ++#define _arch_cmpxchg(ptr, old, new, sc_cas_sfx, prepend, append) \ + ({ \ + __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(*(ptr)) __old = (old); \ +- __typeof__(*(ptr)) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- register unsigned int __rc; \ +- switch (size) { \ +- case 4: \ +- __asm__ __volatile__ ( \ +- RISCV_RELEASE_BARRIER \ +- "0: lr.w %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.w %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" ((long)__old), "rJ" (__new) \ +- : "memory"); \ ++ __typeof__(*(__ptr)) __old = (old); \ ++ __typeof__(*(__ptr)) __new = (new); \ ++ __typeof__(*(__ptr)) __ret; \ ++ \ ++ switch (sizeof(*__ptr)) { \ ++ case 1: \ ++ __arch_cmpxchg_masked(sc_cas_sfx, ".b" sc_cas_sfx, \ ++ prepend, append, \ ++ __ret, __ptr, __old, __new); \ + break; \ +- case 8: \ +- __asm__ __volatile__ ( \ +- RISCV_RELEASE_BARRIER \ +- "0: lr.d %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.d %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" (__old), "rJ" (__new) \ +- : "memory"); \ ++ case 2: \ ++ __arch_cmpxchg_masked(sc_cas_sfx, ".h" sc_cas_sfx, \ ++ prepend, append, \ ++ __ret, __ptr, __old, __new); \ + break; \ +- default: \ +- BUILD_BUG(); \ +- } \ +- __ret; \ +-}) +- +-#define arch_cmpxchg_release(ptr, o, n) \ +-({ \ +- __typeof__(*(ptr)) _o_ = (o); \ +- __typeof__(*(ptr)) _n_ = (n); \ +- (__typeof__(*(ptr))) __cmpxchg_release((ptr), \ +- _o_, _n_, sizeof(*(ptr))); \ +-}) +- +-#define __cmpxchg(ptr, old, new, size) \ +-({ \ +- __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(*(ptr)) __old = (old); \ +- __typeof__(*(ptr)) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- register unsigned int __rc; \ +- switch (size) { \ + case 4: \ +- __asm__ __volatile__ ( \ +- "0: lr.w %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.w.rl %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- " fence rw, rw\n" \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" ((long)__old), "rJ" (__new) \ +- : "memory"); \ ++ __arch_cmpxchg(".w", ".w" sc_cas_sfx, prepend, append, \ ++ __ret, __ptr, (long), __old, __new); \ + break; \ + case 8: \ +- __asm__ __volatile__ ( \ +- "0: lr.d %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.d.rl %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- " fence rw, rw\n" \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" (__old), "rJ" (__new) \ +- : "memory"); \ ++ __arch_cmpxchg(".d", ".d" sc_cas_sfx, prepend, append, \ ++ __ret, __ptr, /**/, __old, __new); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ +- __ret; \ ++ (__typeof__(*(__ptr)))__ret; \ + }) + ++#define arch_cmpxchg_relaxed(ptr, o, n) \ ++ _arch_cmpxchg((ptr), (o), (n), "", "", "") ++ ++#define arch_cmpxchg_acquire(ptr, o, n) \ ++ _arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER) ++ ++#define arch_cmpxchg_release(ptr, o, n) \ ++ _arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "") ++ + #define arch_cmpxchg(ptr, o, n) \ +-({ \ +- __typeof__(*(ptr)) _o_ = (o); \ +- __typeof__(*(ptr)) _n_ = (n); \ +- (__typeof__(*(ptr))) __cmpxchg((ptr), \ +- _o_, _n_, sizeof(*(ptr))); \ +-}) ++ _arch_cmpxchg((ptr), (o), (n), ".rl", "", " fence rw, rw\n") + + #define arch_cmpxchg_local(ptr, o, n) \ +- (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr)))) ++ arch_cmpxchg_relaxed((ptr), (o), (n)) + + #define arch_cmpxchg64(ptr, o, n) \ + ({ \ +@@ -360,4 +242,82 @@ + arch_cmpxchg_relaxed((ptr), (o), (n)); \ + }) + ++#define arch_cmpxchg64_relaxed(ptr, o, n) \ ++({ \ ++ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ ++ arch_cmpxchg_relaxed((ptr), (o), (n)); \ ++}) ++ ++#define arch_cmpxchg64_acquire(ptr, o, n) \ ++({ \ ++ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ ++ arch_cmpxchg_acquire((ptr), (o), (n)); \ ++}) ++ ++#define arch_cmpxchg64_release(ptr, o, n) \ ++({ \ ++ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ ++ arch_cmpxchg_release((ptr), (o), (n)); \ ++}) ++ ++#ifdef CONFIG_RISCV_ISA_ZAWRS ++/* ++ * Despite wrs.nto being "WRS-with-no-timeout", in the absence of changes to ++ * @val we expect it to still terminate within a "reasonable" amount of time ++ * for an implementation-specific other reason, a pending, locally-enabled ++ * interrupt, or because it has been configured to raise an illegal ++ * instruction exception. ++ */ ++static __always_inline void __cmpwait(volatile void *ptr, ++ unsigned long val, ++ int size) ++{ ++ unsigned long tmp; + ++ asm goto(ALTERNATIVE("j %l[no_zawrs]", "nop", ++ 0, RISCV_ISA_EXT_ZAWRS, 1) ++ : : : : no_zawrs); ++ ++ switch (size) { ++ case 1: ++ fallthrough; ++ case 2: ++ /* RISC-V doesn't have lr instructions on byte and half-word. */ ++ goto no_zawrs; ++ case 4: ++ asm volatile( ++ " lr.w %0, %1\n" ++ " xor %0, %0, %2\n" ++ " bnez %0, 1f\n" ++ ZAWRS_WRS_NTO "\n" ++ "1:" ++ : "=&r" (tmp), "+A" (*(u32 *)ptr) ++ : "r" (val)); ++ break; ++#if __riscv_xlen == 64 ++ case 8: ++ asm volatile( ++ " lr.d %0, %1\n" ++ " xor %0, %0, %2\n" ++ " bnez %0, 1f\n" ++ ZAWRS_WRS_NTO "\n" ++ "1:" ++ : "=&r" (tmp), "+A" (*(u64 *)ptr) ++ : "r" (val)); ++ break; ++#endif ++ default: ++ BUILD_BUG(); ++ } ++ ++ return; ++ ++no_zawrs: ++ asm volatile(RISCV_PAUSE : : : "memory"); ++} ++ ++#define __cmpwait_relaxed(ptr, val) \ ++ __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr))) ++#endif ++ + #endif /* _ASM_RISCV_CMPXCHG_H */ +diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h +index 2ac955b51148..6b79287baecc 100644 +--- a/arch/riscv/include/asm/compat.h ++++ b/arch/riscv/include/asm/compat.h +@@ -9,7 +9,6 @@ + */ + #include + #include +-#include + #include + + static inline int is_compat_task(void) +diff --git a/arch/riscv/include/asm/cpufeature-macros.h b/arch/riscv/include/asm/cpufeature-macros.h +new file mode 100644 +index 000000000000..a8103edbf51f +--- /dev/null ++++ b/arch/riscv/include/asm/cpufeature-macros.h +@@ -0,0 +1,66 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright 2022-2024 Rivos, Inc ++ */ ++ ++#ifndef _ASM_CPUFEATURE_MACROS_H ++#define _ASM_CPUFEATURE_MACROS_H ++ ++#include ++#include ++ ++#define STANDARD_EXT 0 ++ ++bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit); ++#define riscv_isa_extension_available(isa_bitmap, ext) \ ++ __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) ++ ++static __always_inline bool __riscv_has_extension_likely(const unsigned long vendor, ++ const unsigned long ext) ++{ ++ asm goto(ALTERNATIVE("j %l[l_no]", "nop", %[vendor], %[ext], 1) ++ : ++ : [vendor] "i" (vendor), [ext] "i" (ext) ++ : ++ : l_no); ++ ++ return true; ++l_no: ++ return false; ++} ++ ++static __always_inline bool __riscv_has_extension_unlikely(const unsigned long vendor, ++ const unsigned long ext) ++{ ++ asm goto(ALTERNATIVE("nop", "j %l[l_yes]", %[vendor], %[ext], 1) ++ : ++ : [vendor] "i" (vendor), [ext] "i" (ext) ++ : ++ : l_yes); ++ ++ return false; ++l_yes: ++ return true; ++} ++ ++static __always_inline bool riscv_has_extension_unlikely(const unsigned long ext) ++{ ++ compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) ++ return __riscv_has_extension_unlikely(STANDARD_EXT, ext); ++ ++ return __riscv_isa_extension_available(NULL, ext); ++} ++ ++static __always_inline bool riscv_has_extension_likely(const unsigned long ext) ++{ ++ compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) ++ return __riscv_has_extension_likely(STANDARD_EXT, ext); ++ ++ return __riscv_isa_extension_available(NULL, ext); ++} ++ ++#endif /* _ASM_CPUFEATURE_MACROS_H */ +diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h +index d0345bd659c9..c8346dc0bed8 100644 +--- a/arch/riscv/include/asm/cpufeature.h ++++ b/arch/riscv/include/asm/cpufeature.h +@@ -7,7 +7,12 @@ + #define _ASM_CPUFEATURE_H + + #include ++#include ++#include ++#include ++#include + #include ++#include + /* - * This is a very specific barrier: it's currently only used in two places in - * the kernel, both in the scheduler. See include/linux/spinlock.h for the two + * These are probed via a device_initcall(), via either the SBI or directly +@@ -31,5 +36,69 @@ DECLARE_PER_CPU(long, misaligned_access_speed); + extern struct riscv_isainfo hart_isa[NR_CPUS]; + + void check_unaligned_access(int cpu); ++void __init riscv_user_isa_enable(void); ++ ++#define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size, _validate) { \ ++ .name = #_name, \ ++ .property = #_name, \ ++ .id = _id, \ ++ .subset_ext_ids = _subset_exts, \ ++ .subset_ext_size = _subset_exts_size, \ ++ .validate = _validate \ ++} ++ ++#define __RISCV_ISA_EXT_DATA(_name, _id) _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0, NULL) ++ ++#define __RISCV_ISA_EXT_DATA_VALIDATE(_name, _id, _validate) \ ++ _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0, _validate) ++ ++/* Used to declare pure "lasso" extension (Zk for instance) */ ++#define __RISCV_ISA_EXT_BUNDLE(_name, _bundled_exts) \ ++ _RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, \ ++ ARRAY_SIZE(_bundled_exts), NULL) ++ ++/* Used to declare extensions that are a superset of other extensions (Zvbb for instance) */ ++#define __RISCV_ISA_EXT_SUPERSET(_name, _id, _sub_exts) \ ++ _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), NULL) ++#define __RISCV_ISA_EXT_SUPERSET_VALIDATE(_name, _id, _sub_exts, _validate) \ ++ _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate) ++ ++unsigned long riscv_get_elf_hwcap(void); ++ ++struct riscv_isa_ext_data { ++ const unsigned int id; ++ const char *name; ++ const char *property; ++ const unsigned int *subset_ext_ids; ++ const unsigned int subset_ext_size; ++ int (*validate)(const struct riscv_isa_ext_data *data, const unsigned long *isa_bitmap); ++}; ++ ++extern const struct riscv_isa_ext_data riscv_isa_ext[]; ++extern const size_t riscv_isa_ext_count; ++extern bool riscv_isa_fallback; ++ ++unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap); ++static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsigned long ext) ++{ ++ compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && ++ __riscv_has_extension_likely(STANDARD_EXT, ext)) ++ return true; ++ ++ return __riscv_isa_extension_available(hart_isa[cpu].isa, ext); ++} ++ ++static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsigned long ext) ++{ ++ compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && ++ __riscv_has_extension_unlikely(STANDARD_EXT, ext)) ++ return true; ++ ++ return __riscv_isa_extension_available(hart_isa[cpu].isa, ext); ++} + + #endif +diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h +index 777cb8299551..1fd1bc2f220b 100644 +--- a/arch/riscv/include/asm/csr.h ++++ b/arch/riscv/include/asm/csr.h +@@ -194,6 +194,7 @@ + /* xENVCFG flags */ + #define ENVCFG_STCE (_AC(1, ULL) << 63) + #define ENVCFG_PBMTE (_AC(1, ULL) << 62) ++#define ENVCFG_ADUE (_AC(1, ULL) << 61) + #define ENVCFG_CBZE (_AC(1, UL) << 7) + #define ENVCFG_CBCFE (_AC(1, UL) << 6) + #define ENVCFG_CBIE_SHIFT 4 +@@ -275,6 +276,7 @@ + #define CSR_SIE 0x104 + #define CSR_STVEC 0x105 + #define CSR_SCOUNTEREN 0x106 ++#define CSR_SENVCFG 0x10a + #define CSR_SSCRATCH 0x140 + #define CSR_SEPC 0x141 + #define CSR_SCAUSE 0x142 +@@ -393,10 +395,20 @@ + #define CSR_VTYPE 0xc21 + #define CSR_VLENB 0xc22 + ++/* Scalar Crypto Extension - Entropy */ ++#define CSR_SEED 0x015 ++#define SEED_OPST_MASK _AC(0xC0000000, UL) ++#define SEED_OPST_BIST _AC(0x00000000, UL) ++#define SEED_OPST_WAIT _AC(0x40000000, UL) ++#define SEED_OPST_ES16 _AC(0x80000000, UL) ++#define SEED_OPST_DEAD _AC(0xC0000000, UL) ++#define SEED_ENTROPY_MASK _AC(0xFFFF, UL) ++ + #ifdef CONFIG_RISCV_M_MODE + # define CSR_STATUS CSR_MSTATUS + # define CSR_IE CSR_MIE + # define CSR_TVEC CSR_MTVEC ++# define CSR_ENVCFG CSR_MENVCFG + # define CSR_SCRATCH CSR_MSCRATCH + # define CSR_EPC CSR_MEPC + # define CSR_CAUSE CSR_MCAUSE +@@ -421,6 +433,7 @@ + # define CSR_STATUS CSR_SSTATUS + # define CSR_IE CSR_SIE + # define CSR_TVEC CSR_STVEC ++# define CSR_ENVCFG CSR_SENVCFG + # define CSR_SCRATCH CSR_SSCRATCH + # define CSR_EPC CSR_SEPC + # define CSR_CAUSE CSR_SCAUSE +diff --git a/arch/riscv/include/asm/dmi.h b/arch/riscv/include/asm/dmi.h +new file mode 100644 +index 000000000000..ca7cce557ef7 +--- /dev/null ++++ b/arch/riscv/include/asm/dmi.h +@@ -0,0 +1,24 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2024 Intel Corporation ++ * ++ * based on arch/arm64/include/asm/dmi.h ++ * ++ * This file is subject to the terms and conditions of the GNU General Public ++ * License. See the file "COPYING" in the main directory of this archive ++ * for more details. ++ */ ++ ++#ifndef __ASM_DMI_H ++#define __ASM_DMI_H ++ ++#include ++#include ++ ++#define dmi_early_remap(x, l) memremap(x, l, MEMREMAP_WB) ++#define dmi_early_unmap(x, l) memunmap(x) ++#define dmi_remap(x, l) memremap(x, l, MEMREMAP_WB) ++#define dmi_unmap(x) memunmap(x) ++#define dmi_alloc(l) kzalloc(l, GFP_KERNEL) ++ ++#endif +diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h +index b3b2dfbdf945..06c236bfab53 100644 +--- a/arch/riscv/include/asm/elf.h ++++ b/arch/riscv/include/asm/elf.h +@@ -14,7 +14,7 @@ + #include + #include + #include +-#include ++#include + + /* + * These are used to set parameters in the core dumps. diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h -index d3f3c237adad..c7fd111e3a50 100644 +index d3f3c237adad..5469341b60ce 100644 --- a/arch/riscv/include/asm/errata_list.h +++ b/arch/riscv/include/asm/errata_list.h +@@ -12,8 +12,8 @@ + #include + + #ifdef CONFIG_ERRATA_ANDES +-#define ERRATA_ANDESTECH_NO_IOCP 0 +-#define ERRATA_ANDESTECH_NUMBER 1 ++#define ERRATA_ANDES_NO_IOCP 0 ++#define ERRATA_ANDES_NUMBER 1 + #endif + + #ifdef CONFIG_ERRATA_SIFIVE @@ -128,9 +128,12 @@ asm volatile(ALTERNATIVE( \ * 0000000 11001 00000 000 00000 0001011 */ @@ -21614,7 +34335,7 @@ index d3f3c237adad..c7fd111e3a50 100644 #define ALT_CMO_OP(_op, _start, _size, _cachesize) \ asm volatile(ALTERNATIVE_2( \ -@@ -157,6 +160,33 @@ asm volatile(ALTERNATIVE_2( \ +@@ -157,18 +160,36 @@ asm volatile(ALTERNATIVE_2( \ "r"((unsigned long)(_start) + (_size)) \ : "a0") @@ -21648,22 +34369,290 @@ index d3f3c237adad..c7fd111e3a50 100644 #define THEAD_C9XX_RV_IRQ_PMU 17 #define THEAD_C9XX_CSR_SCOUNTEROF 0x5c5 +-#define ALT_SBI_PMU_OVERFLOW(__ovl) \ +-asm volatile(ALTERNATIVE( \ +- "csrr %0, " __stringify(CSR_SSCOUNTOVF), \ +- "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \ +- THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \ +- CONFIG_ERRATA_THEAD_PMU) \ +- : "=r" (__ovl) : \ +- : "memory") +- + #endif /* __ASSEMBLY__ */ + + #endif +diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h +index 2b443a3a487f..6bcd80325dfc 100644 +--- a/arch/riscv/include/asm/fence.h ++++ b/arch/riscv/include/asm/fence.h +@@ -1,12 +1,18 @@ + #ifndef _ASM_RISCV_FENCE_H + #define _ASM_RISCV_FENCE_H + ++#define RISCV_FENCE_ASM(p, s) "\tfence " #p "," #s "\n" ++#define RISCV_FENCE(p, s) \ ++ ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); }) ++ + #ifdef CONFIG_SMP +-#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n" +-#define RISCV_RELEASE_BARRIER "\tfence rw, w\n" ++#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r, rw) ++#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw, w) ++#define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw, rw) + #else + #define RISCV_ACQUIRE_BARRIER + #define RISCV_RELEASE_BARRIER ++#define RISCV_FULL_BARRIER + #endif + + #endif /* _ASM_RISCV_FENCE_H */ diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h -index f4157034efa9..5e8a89b38091 100644 +index f4157034efa9..869da082252a 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h -@@ -58,6 +58,7 @@ +@@ -8,25 +8,16 @@ + #ifndef _ASM_RISCV_HWCAP_H + #define _ASM_RISCV_HWCAP_H + +-#include +-#include +-#include + #include + + #define RISCV_ISA_EXT_a ('a' - 'a') +-#define RISCV_ISA_EXT_b ('b' - 'a') + #define RISCV_ISA_EXT_c ('c' - 'a') + #define RISCV_ISA_EXT_d ('d' - 'a') + #define RISCV_ISA_EXT_f ('f' - 'a') + #define RISCV_ISA_EXT_h ('h' - 'a') + #define RISCV_ISA_EXT_i ('i' - 'a') +-#define RISCV_ISA_EXT_j ('j' - 'a') +-#define RISCV_ISA_EXT_k ('k' - 'a') + #define RISCV_ISA_EXT_m ('m' - 'a') +-#define RISCV_ISA_EXT_p ('p' - 'a') + #define RISCV_ISA_EXT_q ('q' - 'a') +-#define RISCV_ISA_EXT_s ('s' - 'a') +-#define RISCV_ISA_EXT_u ('u' - 'a') + #define RISCV_ISA_EXT_v ('v' - 'a') + + /* +@@ -58,85 +49,69 @@ #define RISCV_ISA_EXT_ZICSR 40 #define RISCV_ISA_EXT_ZIFENCEI 41 #define RISCV_ISA_EXT_ZIHPM 42 +- +-#define RISCV_ISA_EXT_MAX 64 +#define RISCV_ISA_EXT_SMSTATEEN 43 ++#define RISCV_ISA_EXT_ZICOND 44 ++#define RISCV_ISA_EXT_ZBC 45 ++#define RISCV_ISA_EXT_ZBKB 46 ++#define RISCV_ISA_EXT_ZBKC 47 ++#define RISCV_ISA_EXT_ZBKX 48 ++#define RISCV_ISA_EXT_ZKND 49 ++#define RISCV_ISA_EXT_ZKNE 50 ++#define RISCV_ISA_EXT_ZKNH 51 ++#define RISCV_ISA_EXT_ZKR 52 ++#define RISCV_ISA_EXT_ZKSED 53 ++#define RISCV_ISA_EXT_ZKSH 54 ++#define RISCV_ISA_EXT_ZKT 55 ++#define RISCV_ISA_EXT_ZVBB 56 ++#define RISCV_ISA_EXT_ZVBC 57 ++#define RISCV_ISA_EXT_ZVKB 58 ++#define RISCV_ISA_EXT_ZVKG 59 ++#define RISCV_ISA_EXT_ZVKNED 60 ++#define RISCV_ISA_EXT_ZVKNHA 61 ++#define RISCV_ISA_EXT_ZVKNHB 62 ++#define RISCV_ISA_EXT_ZVKSED 63 ++#define RISCV_ISA_EXT_ZVKSH 64 ++#define RISCV_ISA_EXT_ZVKT 65 ++#define RISCV_ISA_EXT_ZFH 66 ++#define RISCV_ISA_EXT_ZFHMIN 67 ++#define RISCV_ISA_EXT_ZIHINTNTL 68 ++#define RISCV_ISA_EXT_ZVFH 69 ++#define RISCV_ISA_EXT_ZVFHMIN 70 ++#define RISCV_ISA_EXT_ZFA 71 ++#define RISCV_ISA_EXT_ZTSO 72 ++#define RISCV_ISA_EXT_ZACAS 73 ++#define RISCV_ISA_EXT_ZVE32X 74 ++#define RISCV_ISA_EXT_ZVE32F 75 ++#define RISCV_ISA_EXT_ZVE64X 76 ++#define RISCV_ISA_EXT_ZVE64F 77 ++#define RISCV_ISA_EXT_ZVE64D 78 ++#define RISCV_ISA_EXT_ZIMOP 79 ++#define RISCV_ISA_EXT_ZCA 80 ++#define RISCV_ISA_EXT_ZCB 81 ++#define RISCV_ISA_EXT_ZCD 82 ++#define RISCV_ISA_EXT_ZCF 83 ++#define RISCV_ISA_EXT_ZCMOP 84 ++#define RISCV_ISA_EXT_ZAWRS 85 ++#define RISCV_ISA_EXT_SVVPTC 86 ++#define RISCV_ISA_EXT_SMMPM 87 ++#define RISCV_ISA_EXT_SMNPM 88 ++#define RISCV_ISA_EXT_SSNPM 89 ++#define RISCV_ISA_EXT_ZABHA 90 ++#define RISCV_ISA_EXT_ZICCRSE 91 ++#define RISCV_ISA_EXT_SVADE 92 ++#define RISCV_ISA_EXT_SVADU 93 ++ ++#define RISCV_ISA_EXT_XLINUXENVCFG 127 ++ ++#define RISCV_ISA_EXT_MAX 128 ++#define RISCV_ISA_EXT_INVALID U32_MAX + + #ifdef CONFIG_RISCV_M_MODE + #define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SMAIA ++#define RISCV_ISA_EXT_SUPM RISCV_ISA_EXT_SMNPM + #else + #define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SSAIA +-#endif +- +-#ifndef __ASSEMBLY__ +- +-#include +- +-unsigned long riscv_get_elf_hwcap(void); +- +-struct riscv_isa_ext_data { +- const unsigned int id; +- const char *name; +- const char *property; +-}; +- +-extern const struct riscv_isa_ext_data riscv_isa_ext[]; +-extern const size_t riscv_isa_ext_count; +-extern bool riscv_isa_fallback; +- +-unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap); +- +-#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext) +- +-bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit); +-#define riscv_isa_extension_available(isa_bitmap, ext) \ +- __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) +- +-static __always_inline bool +-riscv_has_extension_likely(const unsigned long ext) +-{ +- compiletime_assert(ext < RISCV_ISA_EXT_MAX, +- "ext must be < RISCV_ISA_EXT_MAX"); +- +- if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { +- asm goto( +- ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1) +- : +- : [ext] "i" (ext) +- : +- : l_no); +- } else { +- if (!__riscv_isa_extension_available(NULL, ext)) +- goto l_no; +- } +- +- return true; +-l_no: +- return false; +-} +- +-static __always_inline bool +-riscv_has_extension_unlikely(const unsigned long ext) +-{ +- compiletime_assert(ext < RISCV_ISA_EXT_MAX, +- "ext must be < RISCV_ISA_EXT_MAX"); +- +- if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { +- asm goto( +- ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1) +- : +- : [ext] "i" (ext) +- : +- : l_yes); +- } else { +- if (__riscv_isa_extension_available(NULL, ext)) +- goto l_yes; +- } +- +- return false; +-l_yes: +- return true; +-} +- ++#define RISCV_ISA_EXT_SUPM RISCV_ISA_EXT_SSNPM + #endif + + #endif /* _ASM_RISCV_HWCAP_H */ +diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h +index 7cad513538d8..ef01c182af2b 100644 +--- a/arch/riscv/include/asm/hwprobe.h ++++ b/arch/riscv/include/asm/hwprobe.h +@@ -8,11 +8,35 @@ - #define RISCV_ISA_EXT_MAX 64 + #include + +-#define RISCV_HWPROBE_MAX_KEY 5 ++#define RISCV_HWPROBE_MAX_KEY 8 + + static inline bool riscv_hwprobe_key_is_valid(__s64 key) + { + return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY; + } + ++static inline bool hwprobe_key_is_bitmask(__s64 key) ++{ ++ switch (key) { ++ case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: ++ case RISCV_HWPROBE_KEY_IMA_EXT_0: ++ case RISCV_HWPROBE_KEY_CPUPERF_0: ++ return true; ++ } ++ ++ return false; ++} ++ ++static inline bool riscv_hwprobe_pair_cmp(struct riscv_hwprobe *pair, ++ struct riscv_hwprobe *other_pair) ++{ ++ if (pair->key != other_pair->key) ++ return false; ++ ++ if (hwprobe_key_is_bitmask(pair->key)) ++ return (pair->value & other_pair->value) == other_pair->value; ++ ++ return pair->value == other_pair->value; ++} ++ + #endif +diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h +index 6960beb75f32..cbd51bfdf527 100644 +--- a/arch/riscv/include/asm/insn-def.h ++++ b/arch/riscv/include/asm/insn-def.h +@@ -196,4 +196,8 @@ + INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \ + RS1(base), SIMM12(4)) ++#define RISCV_PAUSE ".4byte 0x100000f" ++#define ZAWRS_WRS_NTO ".4byte 0x00d00073" ++#define ZAWRS_WRS_STO ".4byte 0x01d00073" ++ + #endif /* __ASM_INSN_DEF_H */ diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h -index 42497d487a17..bbdc3c7ed6ca 100644 +index 42497d487a17..8118363494e0 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h +@@ -47,10 +47,10 @@ + * sufficient to ensure this works sanely on controllers that support I/O + * writes. + */ +-#define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory"); +-#define __io_par(v) __asm__ __volatile__ ("fence i,ior" : : : "memory"); +-#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory"); +-#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory"); ++#define __io_pbr() RISCV_FENCE(io, i) ++#define __io_par(v) RISCV_FENCE(i, ior) ++#define __io_pbw() RISCV_FENCE(iow, o) ++#define __io_paw() RISCV_FENCE(o, io) + + /* + * Accesses from a single hart to a single I/O address must be ordered. This @@ -140,4 +140,8 @@ __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw()) ((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL)) #endif @@ -21673,6 +34662,187 @@ index 42497d487a17..bbdc3c7ed6ca 100644 + ioremap_prot((addr), (size), _PAGE_IOREMAP_WC) + #endif /* _ASM_RISCV_IO_H */ +diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h +index 8e10a94430a2..dba0359f029e 100644 +--- a/arch/riscv/include/asm/irq.h ++++ b/arch/riscv/include/asm/irq.h +@@ -12,8 +12,68 @@ + + #include + ++#define INVALID_CONTEXT UINT_MAX ++ ++#ifdef CONFIG_SMP ++bool arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu); ++#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace ++#endif ++ + void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void)); + + struct fwnode_handle *riscv_get_intc_hwnode(void); + ++#ifdef CONFIG_ACPI ++ ++enum riscv_irqchip_type { ++ ACPI_RISCV_IRQCHIP_INTC = 0x00, ++ ACPI_RISCV_IRQCHIP_IMSIC = 0x01, ++ ACPI_RISCV_IRQCHIP_PLIC = 0x02, ++ ACPI_RISCV_IRQCHIP_APLIC = 0x03, ++}; ++ ++int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base, ++ u32 *id, u32 *nr_irqs, u32 *nr_idcs); ++struct fwnode_handle *riscv_acpi_get_gsi_domain_id(u32 gsi); ++unsigned long acpi_rintc_index_to_hartid(u32 index); ++unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id, unsigned int ctxt_idx); ++unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id); ++unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx); ++int __init acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res); ++ ++#else ++static inline int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base, ++ u32 *id, u32 *nr_irqs, u32 *nr_idcs) ++{ ++ return 0; ++} ++ ++static inline unsigned long acpi_rintc_index_to_hartid(u32 index) ++{ ++ return INVALID_HARTID; ++} ++ ++static inline unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id, ++ unsigned int ctxt_idx) ++{ ++ return INVALID_HARTID; ++} ++ ++static inline unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id) ++{ ++ return INVALID_CONTEXT; ++} ++ ++static inline unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx) ++{ ++ return INVALID_CONTEXT; ++} ++ ++static inline int __init acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res) ++{ ++ return 0; ++} ++ ++#endif /* CONFIG_ACPI */ ++ + #endif /* _ASM_RISCV_IRQ_H */ +diff --git a/arch/riscv/include/asm/kvm_aia_aplic.h b/arch/riscv/include/asm/kvm_aia_aplic.h +deleted file mode 100644 +index 6dd1a4809ec1..000000000000 +--- a/arch/riscv/include/asm/kvm_aia_aplic.h ++++ /dev/null +@@ -1,58 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0-only */ +-/* +- * Copyright (C) 2021 Western Digital Corporation or its affiliates. +- * Copyright (C) 2022 Ventana Micro Systems Inc. +- */ +-#ifndef __KVM_RISCV_AIA_IMSIC_H +-#define __KVM_RISCV_AIA_IMSIC_H +- +-#include +- +-#define APLIC_MAX_IDC BIT(14) +-#define APLIC_MAX_SOURCE 1024 +- +-#define APLIC_DOMAINCFG 0x0000 +-#define APLIC_DOMAINCFG_RDONLY 0x80000000 +-#define APLIC_DOMAINCFG_IE BIT(8) +-#define APLIC_DOMAINCFG_DM BIT(2) +-#define APLIC_DOMAINCFG_BE BIT(0) +- +-#define APLIC_SOURCECFG_BASE 0x0004 +-#define APLIC_SOURCECFG_D BIT(10) +-#define APLIC_SOURCECFG_CHILDIDX_MASK 0x000003ff +-#define APLIC_SOURCECFG_SM_MASK 0x00000007 +-#define APLIC_SOURCECFG_SM_INACTIVE 0x0 +-#define APLIC_SOURCECFG_SM_DETACH 0x1 +-#define APLIC_SOURCECFG_SM_EDGE_RISE 0x4 +-#define APLIC_SOURCECFG_SM_EDGE_FALL 0x5 +-#define APLIC_SOURCECFG_SM_LEVEL_HIGH 0x6 +-#define APLIC_SOURCECFG_SM_LEVEL_LOW 0x7 +- +-#define APLIC_IRQBITS_PER_REG 32 +- +-#define APLIC_SETIP_BASE 0x1c00 +-#define APLIC_SETIPNUM 0x1cdc +- +-#define APLIC_CLRIP_BASE 0x1d00 +-#define APLIC_CLRIPNUM 0x1ddc +- +-#define APLIC_SETIE_BASE 0x1e00 +-#define APLIC_SETIENUM 0x1edc +- +-#define APLIC_CLRIE_BASE 0x1f00 +-#define APLIC_CLRIENUM 0x1fdc +- +-#define APLIC_SETIPNUM_LE 0x2000 +-#define APLIC_SETIPNUM_BE 0x2004 +- +-#define APLIC_GENMSI 0x3000 +- +-#define APLIC_TARGET_BASE 0x3004 +-#define APLIC_TARGET_HART_IDX_SHIFT 18 +-#define APLIC_TARGET_HART_IDX_MASK 0x3fff +-#define APLIC_TARGET_GUEST_IDX_SHIFT 12 +-#define APLIC_TARGET_GUEST_IDX_MASK 0x3f +-#define APLIC_TARGET_IPRIO_MASK 0xff +-#define APLIC_TARGET_EIID_MASK 0x7ff +- +-#endif +diff --git a/arch/riscv/include/asm/kvm_aia_imsic.h b/arch/riscv/include/asm/kvm_aia_imsic.h +deleted file mode 100644 +index da5881d2bde0..000000000000 +--- a/arch/riscv/include/asm/kvm_aia_imsic.h ++++ /dev/null +@@ -1,38 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0-only */ +-/* +- * Copyright (C) 2021 Western Digital Corporation or its affiliates. +- * Copyright (C) 2022 Ventana Micro Systems Inc. +- */ +-#ifndef __KVM_RISCV_AIA_IMSIC_H +-#define __KVM_RISCV_AIA_IMSIC_H +- +-#include +-#include +- +-#define IMSIC_MMIO_PAGE_SHIFT 12 +-#define IMSIC_MMIO_PAGE_SZ (1UL << IMSIC_MMIO_PAGE_SHIFT) +-#define IMSIC_MMIO_PAGE_LE 0x00 +-#define IMSIC_MMIO_PAGE_BE 0x04 +- +-#define IMSIC_MIN_ID 63 +-#define IMSIC_MAX_ID 2048 +- +-#define IMSIC_EIDELIVERY 0x70 +- +-#define IMSIC_EITHRESHOLD 0x72 +- +-#define IMSIC_EIP0 0x80 +-#define IMSIC_EIP63 0xbf +-#define IMSIC_EIPx_BITS 32 +- +-#define IMSIC_EIE0 0xc0 +-#define IMSIC_EIE63 0xff +-#define IMSIC_EIEx_BITS 32 +- +-#define IMSIC_FIRST IMSIC_EIDELIVERY +-#define IMSIC_LAST IMSIC_EIE63 +- +-#define IMSIC_MMIO_SETIPNUM_LE 0x00 +-#define IMSIC_MMIO_SETIPNUM_BE 0x04 +- +-#endif diff --git a/arch/riscv/include/asm/membarrier.h b/arch/riscv/include/asm/membarrier.h index 6c016ebb5020..47b240d0d596 100644 --- a/arch/riscv/include/asm/membarrier.h @@ -21703,6 +34873,42 @@ index 6c016ebb5020..47b240d0d596 100644 * Matches a full barrier in the proximity of the membarrier * system call entry. */ +diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h +index 4c58ee7f95ec..06cadfd7a237 100644 +--- a/arch/riscv/include/asm/mmio.h ++++ b/arch/riscv/include/asm/mmio.h +@@ -12,6 +12,7 @@ + #define _ASM_RISCV_MMIO_H + + #include ++#include + #include + + /* Generic IO read/write. These perform native-endian accesses. */ +@@ -131,8 +132,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) + * doesn't define any ordering between the memory space and the I/O space. + */ + #define __io_br() do {} while (0) +-#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); }) +-#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); }) ++#define __io_ar(v) RISCV_FENCE(i, ir) ++#define __io_bw() RISCV_FENCE(w, o) + #define __io_aw() mmiowb_set_pending() + + #define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) +diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h +index 0b2333e71fdc..52ce4a399d9b 100644 +--- a/arch/riscv/include/asm/mmiowb.h ++++ b/arch/riscv/include/asm/mmiowb.h +@@ -7,7 +7,7 @@ + * "o,w" is sufficient to ensure that all writes to the device have completed + * before the write to the spinlock is allowed to commit. + */ +-#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory"); ++#define mmiowb() RISCV_FENCE(o, w) + + #include + #include diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h index d169a4f41a2e..deaf971253a2 100644 --- a/arch/riscv/include/asm/pgalloc.h @@ -21813,10 +35019,18 @@ index 3272ca7a5270..b99bd66107a6 100644 static inline u64 riscv_page_mtmask(void) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h -index e58315cedfd3..d094015802ef 100644 +index e58315cedfd3..e93155c2c200 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h -@@ -205,7 +205,8 @@ extern struct pt_alloc_ops pt_ops __initdata; +@@ -117,6 +117,7 @@ + #include + #include + #include ++#include + + #define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT) + +@@ -205,7 +206,8 @@ extern struct pt_alloc_ops pt_ops __initdata; #define PAGE_TABLE __pgprot(_PAGE_TABLE) @@ -21826,7 +35040,25 @@ index e58315cedfd3..d094015802ef 100644 #define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP) extern pgd_t swapper_pg_dir[]; -@@ -663,6 +664,12 @@ static inline int pmd_write(pmd_t pmd) +@@ -620,6 +622,17 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) + return __pgprot(prot); + } + ++/* ++ * Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By ++ * default the M-mode firmware enables the hardware updating scheme when only Svadu is present in ++ * DT. ++ */ ++#define arch_has_hw_pte_young arch_has_hw_pte_young ++static inline bool arch_has_hw_pte_young(void) ++{ ++ return riscv_has_extension_unlikely(RISCV_ISA_EXT_SVADU); ++} ++ + /* + * THP functions + */ +@@ -663,6 +676,12 @@ static inline int pmd_write(pmd_t pmd) return pte_write(pmd_pte(pmd)); } @@ -21839,6 +35071,23 @@ index e58315cedfd3..d094015802ef 100644 static inline int pmd_dirty(pmd_t pmd) { return pte_dirty(pmd_pte(pmd)); +diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h +index 4f6af8c6cfa0..0de0e2e29a82 100644 +--- a/arch/riscv/include/asm/processor.h ++++ b/arch/riscv/include/asm/processor.h +@@ -57,6 +57,12 @@ + + #define STACK_TOP DEFAULT_MAP_WINDOW + ++#ifdef CONFIG_MMU ++#define user_max_virt_addr() arch_get_mmap_end(ULONG_MAX, 0, 0) ++#else ++#define user_max_virt_addr() 0 ++#endif /* CONFIG_MMU */ ++ + /* + * This decides where the kernel will search for a free chunk of vm + * space during mmap's. diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 3ed853b8a8c8..8d261a317175 100644 --- a/arch/riscv/include/asm/sbi.h @@ -21879,10 +35128,40 @@ index 2f901a410586..87ab782be702 100644 #else #define MAX_PHYSMEM_BITS 32 #endif /* CONFIG_64BIT */ +diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h +index 02f87867389a..4ffb022b097f 100644 +--- a/arch/riscv/include/asm/suspend.h ++++ b/arch/riscv/include/asm/suspend.h +@@ -13,7 +13,7 @@ struct suspend_context { + /* Saved and restored by low-level functions */ + struct pt_regs regs; + /* Saved and restored by high-level functions */ +- unsigned long scratch; ++ unsigned long envcfg; + unsigned long tvec; + unsigned long ie; + #ifdef CONFIG_MMU +@@ -55,4 +55,7 @@ int hibernate_resume_nonboot_cpu_disable(void); + asmlinkage void hibernate_restore_image(unsigned long resume_satp, unsigned long satp_temp, + unsigned long cpu_resume); + asmlinkage int hibernate_core_restore_code(void); ++bool riscv_sbi_hsm_is_supported(void); ++bool riscv_sbi_suspend_state_is_valid(u32 state); ++int riscv_sbi_hart_suspend(u32 state); + #endif diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h -index a727be723c56..1da3f54d52f0 100644 +index a727be723c56..7508f3ec8063 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h +@@ -9,7 +9,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + #include @@ -63,6 +63,21 @@ static __always_inline bool has_fpu(void) return riscv_has_extension_likely(RISCV_ISA_EXT_f) || riscv_has_extension_likely(RISCV_ISA_EXT_d); @@ -21969,18 +35248,1461 @@ index 50b63b5c15bd..1f6c38420d8e 100644 #define tlb_flush tlb_flush #include +diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h +index 96b65a5396df..8f383f05a290 100644 +--- a/arch/riscv/include/asm/vdso/processor.h ++++ b/arch/riscv/include/asm/vdso/processor.h +@@ -5,6 +5,7 @@ + #ifndef __ASSEMBLY__ + + #include ++#include + + static inline void cpu_relax(void) + { +@@ -14,16 +15,11 @@ static inline void cpu_relax(void) + __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy)); + #endif + +-#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE + /* + * Reduce instruction retirement. + * This assumes the PC changes. + */ +- __asm__ __volatile__ ("pause"); +-#else +- /* Encoding of the pause instruction */ +- __asm__ __volatile__ (".4byte 0x100000F"); +-#endif ++ __asm__ __volatile__ (RISCV_PAUSE); + barrier(); + } + +diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h +index c5ee07b3df07..be77ae870829 100644 +--- a/arch/riscv/include/asm/vector.h ++++ b/arch/riscv/include/asm/vector.h +@@ -15,7 +15,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + +@@ -25,7 +25,7 @@ bool riscv_v_first_use_handler(struct pt_regs *regs); + + static __always_inline bool has_vector(void) + { +- return riscv_has_extension_unlikely(RISCV_ISA_EXT_v); ++ return riscv_has_extension_unlikely(RISCV_ISA_EXT_ZVE32X); + } + + static inline void __riscv_v_vstate_clean(struct pt_regs *regs) +@@ -79,7 +79,7 @@ static __always_inline void __vstate_csr_restore(struct __riscv_v_ext_state *src + { + asm volatile ( + ".option push\n\t" +- ".option arch, +v\n\t" ++ ".option arch, +zve32x\n\t" + "vsetvl x0, %2, %1\n\t" + ".option pop\n\t" + "csrw " __stringify(CSR_VSTART) ", %0\n\t" +@@ -97,7 +97,7 @@ static inline void __riscv_v_vstate_save(struct __riscv_v_ext_state *save_to, + __vstate_csr_save(save_to); + asm volatile ( + ".option push\n\t" +- ".option arch, +v\n\t" ++ ".option arch, +zve32x\n\t" + "vsetvli %0, x0, e8, m8, ta, ma\n\t" + "vse8.v v0, (%1)\n\t" + "add %1, %1, %0\n\t" +@@ -119,7 +119,7 @@ static inline void __riscv_v_vstate_restore(struct __riscv_v_ext_state *restore_ + riscv_v_enable(); + asm volatile ( + ".option push\n\t" +- ".option arch, +v\n\t" ++ ".option arch, +zve32x\n\t" + "vsetvli %0, x0, e8, m8, ta, ma\n\t" + "vle8.v v0, (%1)\n\t" + "add %1, %1, %0\n\t" +@@ -141,7 +141,7 @@ static inline void __riscv_v_vstate_discard(void) + riscv_v_enable(); + asm volatile ( + ".option push\n\t" +- ".option arch, +v\n\t" ++ ".option arch, +zve32x\n\t" + "vsetvli %0, x0, e8, m8, ta, ma\n\t" + "vmv.v.i v0, -1\n\t" + "vmv.v.i v8, -1\n\t" +diff --git a/arch/riscv/include/asm/vendor_extensions.h b/arch/riscv/include/asm/vendor_extensions.h +new file mode 100644 +index 000000000000..0517ce38c5be +--- /dev/null ++++ b/arch/riscv/include/asm/vendor_extensions.h +@@ -0,0 +1,103 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright 2024 Rivos, Inc ++ */ ++ ++#ifndef _ASM_VENDOR_EXTENSIONS_H ++#define _ASM_VENDOR_EXTENSIONS_H ++ ++#include ++ ++#include ++ ++/* ++ * The extension keys of each vendor must be strictly less than this value. ++ */ ++#define RISCV_ISA_VENDOR_EXT_MAX 32 ++ ++struct riscv_isavendorinfo { ++ DECLARE_BITMAP(isa, RISCV_ISA_VENDOR_EXT_MAX); ++}; ++ ++struct riscv_isa_vendor_ext_data_list { ++ bool is_initialized; ++ const size_t ext_data_count; ++ const struct riscv_isa_ext_data *ext_data; ++ struct riscv_isavendorinfo per_hart_isa_bitmap[NR_CPUS]; ++ struct riscv_isavendorinfo all_harts_isa_bitmap; ++}; ++ ++extern struct riscv_isa_vendor_ext_data_list *riscv_isa_vendor_ext_list[]; ++ ++extern const size_t riscv_isa_vendor_ext_list_size; ++ ++/* ++ * The alternatives need some way of distinguishing between vendor extensions ++ * and errata. Incrementing all of the vendor extension keys so they are at ++ * least 0x8000 accomplishes that. ++ */ ++#define RISCV_VENDOR_EXT_ALTERNATIVES_BASE 0x8000 ++ ++#define VENDOR_EXT_ALL_CPUS -1 ++ ++bool __riscv_isa_vendor_extension_available(int cpu, unsigned long vendor, unsigned int bit); ++#define riscv_cpu_isa_vendor_extension_available(cpu, vendor, ext) \ ++ __riscv_isa_vendor_extension_available(cpu, vendor, RISCV_ISA_VENDOR_EXT_##ext) ++#define riscv_isa_vendor_extension_available(vendor, ext) \ ++ __riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, \ ++ RISCV_ISA_VENDOR_EXT_##ext) ++ ++static __always_inline bool riscv_has_vendor_extension_likely(const unsigned long vendor, ++ const unsigned long ext) ++{ ++ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) ++ return false; ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) ++ return __riscv_has_extension_likely(vendor, ++ ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE); ++ ++ return __riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, ext); ++} ++ ++static __always_inline bool riscv_has_vendor_extension_unlikely(const unsigned long vendor, ++ const unsigned long ext) ++{ ++ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) ++ return false; ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) ++ return __riscv_has_extension_unlikely(vendor, ++ ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE); ++ ++ return __riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, ext); ++} ++ ++static __always_inline bool riscv_cpu_has_vendor_extension_likely(const unsigned long vendor, ++ int cpu, const unsigned long ext) ++{ ++ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) ++ return false; ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && ++ __riscv_has_extension_likely(vendor, ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE)) ++ return true; ++ ++ return __riscv_isa_vendor_extension_available(cpu, vendor, ext); ++} ++ ++static __always_inline bool riscv_cpu_has_vendor_extension_unlikely(const unsigned long vendor, ++ int cpu, ++ const unsigned long ext) ++{ ++ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) ++ return false; ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && ++ __riscv_has_extension_unlikely(vendor, ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE)) ++ return true; ++ ++ return __riscv_isa_vendor_extension_available(cpu, vendor, ext); ++} ++ ++#endif /* _ASM_VENDOR_EXTENSIONS_H */ +diff --git a/arch/riscv/include/asm/vendor_extensions/andes.h b/arch/riscv/include/asm/vendor_extensions/andes.h +new file mode 100644 +index 000000000000..7bb2fc43438f +--- /dev/null ++++ b/arch/riscv/include/asm/vendor_extensions/andes.h +@@ -0,0 +1,19 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef _ASM_RISCV_VENDOR_EXTENSIONS_ANDES_H ++#define _ASM_RISCV_VENDOR_EXTENSIONS_ANDES_H ++ ++#include ++ ++#include ++ ++#define RISCV_ISA_VENDOR_EXT_XANDESPMU 0 ++ ++/* ++ * Extension keys should be strictly less than max. ++ * It is safe to increment this when necessary. ++ */ ++#define RISCV_ISA_VENDOR_EXT_MAX_ANDES 32 ++ ++extern struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_andes; ++ ++#endif +diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h +index e55407ace0c3..2f2bb0c84f9a 100644 +--- a/arch/riscv/include/asm/vendorid_list.h ++++ b/arch/riscv/include/asm/vendorid_list.h +@@ -5,7 +5,7 @@ + #ifndef ASM_VENDOR_LIST_H + #define ASM_VENDOR_LIST_H + +-#define ANDESTECH_VENDOR_ID 0x31e ++#define ANDES_VENDOR_ID 0x31e + #define SIFIVE_VENDOR_ID 0x489 + #define THEAD_VENDOR_ID 0x5b7 + +diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h +index 006bfb48343d..6fdaefa62e14 100644 +--- a/arch/riscv/include/uapi/asm/hwprobe.h ++++ b/arch/riscv/include/uapi/asm/hwprobe.h +@@ -10,7 +10,7 @@ + + /* + * Interface for probing hardware capabilities from userspace, see +- * Documentation/riscv/hwprobe.rst for more information. ++ * Documentation/arch/riscv/hwprobe.rst for more information. + */ + struct riscv_hwprobe { + __s64 key; +@@ -29,6 +29,50 @@ struct riscv_hwprobe { + #define RISCV_HWPROBE_EXT_ZBA (1 << 3) + #define RISCV_HWPROBE_EXT_ZBB (1 << 4) + #define RISCV_HWPROBE_EXT_ZBS (1 << 5) ++#define RISCV_HWPROBE_EXT_ZICBOZ (1 << 6) ++#define RISCV_HWPROBE_EXT_ZBC (1 << 7) ++#define RISCV_HWPROBE_EXT_ZBKB (1 << 8) ++#define RISCV_HWPROBE_EXT_ZBKC (1 << 9) ++#define RISCV_HWPROBE_EXT_ZBKX (1 << 10) ++#define RISCV_HWPROBE_EXT_ZKND (1 << 11) ++#define RISCV_HWPROBE_EXT_ZKNE (1 << 12) ++#define RISCV_HWPROBE_EXT_ZKNH (1 << 13) ++#define RISCV_HWPROBE_EXT_ZKSED (1 << 14) ++#define RISCV_HWPROBE_EXT_ZKSH (1 << 15) ++#define RISCV_HWPROBE_EXT_ZKT (1 << 16) ++#define RISCV_HWPROBE_EXT_ZVBB (1 << 17) ++#define RISCV_HWPROBE_EXT_ZVBC (1 << 18) ++#define RISCV_HWPROBE_EXT_ZVKB (1 << 19) ++#define RISCV_HWPROBE_EXT_ZVKG (1 << 20) ++#define RISCV_HWPROBE_EXT_ZVKNED (1 << 21) ++#define RISCV_HWPROBE_EXT_ZVKNHA (1 << 22) ++#define RISCV_HWPROBE_EXT_ZVKNHB (1 << 23) ++#define RISCV_HWPROBE_EXT_ZVKSED (1 << 24) ++#define RISCV_HWPROBE_EXT_ZVKSH (1 << 25) ++#define RISCV_HWPROBE_EXT_ZVKT (1 << 26) ++#define RISCV_HWPROBE_EXT_ZFH (1 << 27) ++#define RISCV_HWPROBE_EXT_ZFHMIN (1 << 28) ++#define RISCV_HWPROBE_EXT_ZIHINTNTL (1 << 29) ++#define RISCV_HWPROBE_EXT_ZVFH (1 << 30) ++#define RISCV_HWPROBE_EXT_ZVFHMIN (1ULL << 31) ++#define RISCV_HWPROBE_EXT_ZFA (1ULL << 32) ++#define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33) ++#define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34) ++#define RISCV_HWPROBE_EXT_ZICOND (1ULL << 35) ++#define RISCV_HWPROBE_EXT_ZIHINTPAUSE (1ULL << 36) ++#define RISCV_HWPROBE_EXT_ZVE32X (1ULL << 37) ++#define RISCV_HWPROBE_EXT_ZVE32F (1ULL << 38) ++#define RISCV_HWPROBE_EXT_ZVE64X (1ULL << 39) ++#define RISCV_HWPROBE_EXT_ZVE64F (1ULL << 40) ++#define RISCV_HWPROBE_EXT_ZVE64D (1ULL << 41) ++#define RISCV_HWPROBE_EXT_ZIMOP (1ULL << 42) ++#define RISCV_HWPROBE_EXT_ZCA (1ULL << 43) ++#define RISCV_HWPROBE_EXT_ZCB (1ULL << 44) ++#define RISCV_HWPROBE_EXT_ZCD (1ULL << 45) ++#define RISCV_HWPROBE_EXT_ZCF (1ULL << 46) ++#define RISCV_HWPROBE_EXT_ZCMOP (1ULL << 47) ++#define RISCV_HWPROBE_EXT_ZAWRS (1ULL << 48) ++#define RISCV_HWPROBE_EXT_SUPM (1ULL << 49) + #define RISCV_HWPROBE_KEY_CPUPERF_0 5 + #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0) + #define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0) +@@ -36,6 +80,12 @@ struct riscv_hwprobe { + #define RISCV_HWPROBE_MISALIGNED_FAST (3 << 0) + #define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0) + #define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0) ++#define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6 ++#define RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS 7 ++#define RISCV_HWPROBE_KEY_TIME_CSR_FREQ 8 + /* Increase RISCV_HWPROBE_MAX_KEY when adding items. */ + ++/* Flags */ ++#define RISCV_HWPROBE_WHICH_CPUS (1 << 0) ++ + #endif +diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile +index a2499fcc1cf3..70d04e1875f0 100644 +--- a/arch/riscv/kernel/Makefile ++++ b/arch/riscv/kernel/Makefile +@@ -52,12 +52,15 @@ obj-y += setup.o + obj-y += signal.o + obj-y += syscall_table.o + obj-y += sys_riscv.o ++obj-y += sys_hwprobe.o + obj-y += time.o + obj-y += traps.o + obj-y += riscv_ksyms.o + obj-y += stacktrace.o + obj-y += cacheinfo.o + obj-y += patch.o ++obj-y += vendor_extensions.o ++obj-y += vendor_extensions/ + obj-y += probes/ + obj-$(CONFIG_MMU) += vdso.o vdso/ + +@@ -104,3 +107,4 @@ obj-$(CONFIG_COMPAT) += compat_vdso/ + obj-$(CONFIG_64BIT) += pi/ + obj-$(CONFIG_ACPI) += acpi.o + obj-$(CONFIG_LIVEPATCH_WO_FTRACE) += livepatch.o ++obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o +diff --git a/arch/riscv/kernel/acpi.c b/arch/riscv/kernel/acpi.c +index 07a43843368d..2fd29695a788 100644 +--- a/arch/riscv/kernel/acpi.c ++++ b/arch/riscv/kernel/acpi.c +@@ -14,9 +14,12 @@ + */ + + #include ++#include + #include ++#include ++#include + #include +-#include ++#include + + int acpi_noirq = 1; /* skip ACPI IRQ initialization */ + int acpi_disabled = 1; +@@ -130,7 +133,7 @@ void __init acpi_boot_table_init(void) + if (param_acpi_off || + (!param_acpi_on && !param_acpi_force && + efi.acpi20 == EFI_INVALID_TABLE_ADDR)) +- return; ++ goto done; + + /* + * ACPI is disabled at this point. Enable it in order to parse +@@ -150,6 +153,14 @@ void __init acpi_boot_table_init(void) + if (!param_acpi_force) + disable_acpi(); + } ++ ++done: ++ if (acpi_disabled) { ++ if (earlycon_acpi_spcr_enable) ++ early_init_dt_scan_chosen_stdout(); ++ } else { ++ acpi_parse_spcr(earlycon_acpi_spcr_enable, true); ++ } + } + + static int acpi_parse_madt_rintc(union acpi_subtable_headers *header, const unsigned long end) +@@ -190,11 +201,6 @@ struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu) + return &cpu_madt_rintc[cpu]; + } + +-u32 get_acpi_id_for_cpu(int cpu) +-{ +- return acpi_cpu_get_madt_rintc(cpu)->uid; +-} +- + /* + * __acpi_map_table() will be called before paging_init(), so early_ioremap() + * or early_memremap() should be called here to for ACPI table mapping. +@@ -217,35 +223,114 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size) + + void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) + { +- return (void __iomem *)memremap(phys, size, MEMREMAP_WB); ++ efi_memory_desc_t *md, *region = NULL; ++ pgprot_t prot; ++ ++ if (WARN_ON_ONCE(!efi_enabled(EFI_MEMMAP))) ++ return NULL; ++ ++ for_each_efi_memory_desc(md) { ++ u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); ++ ++ if (phys < md->phys_addr || phys >= end) ++ continue; ++ ++ if (phys + size > end) { ++ pr_warn(FW_BUG "requested region covers multiple EFI memory regions\n"); ++ return NULL; ++ } ++ region = md; ++ break; ++ } ++ ++ /* ++ * It is fine for AML to remap regions that are not represented in the ++ * EFI memory map at all, as it only describes normal memory, and MMIO ++ * regions that require a virtual mapping to make them accessible to ++ * the EFI runtime services. ++ */ ++ prot = PAGE_KERNEL_IO; ++ if (region) { ++ switch (region->type) { ++ case EFI_LOADER_CODE: ++ case EFI_LOADER_DATA: ++ case EFI_BOOT_SERVICES_CODE: ++ case EFI_BOOT_SERVICES_DATA: ++ case EFI_CONVENTIONAL_MEMORY: ++ case EFI_PERSISTENT_MEMORY: ++ if (memblock_is_map_memory(phys) || ++ !memblock_is_region_memory(phys, size)) { ++ pr_warn(FW_BUG "requested region covers kernel memory\n"); ++ return NULL; ++ } ++ ++ /* ++ * Mapping kernel memory is permitted if the region in ++ * question is covered by a single memblock with the ++ * NOMAP attribute set: this enables the use of ACPI ++ * table overrides passed via initramfs. ++ * This particular use case only requires read access. ++ */ ++ fallthrough; ++ ++ case EFI_RUNTIME_SERVICES_CODE: ++ /* ++ * This would be unusual, but not problematic per se, ++ * as long as we take care not to create a writable ++ * mapping for executable code. ++ */ ++ prot = PAGE_KERNEL_RO; ++ break; ++ ++ case EFI_ACPI_RECLAIM_MEMORY: ++ /* ++ * ACPI reclaim memory is used to pass firmware tables ++ * and other data that is intended for consumption by ++ * the OS only, which may decide it wants to reclaim ++ * that memory and use it for something else. We never ++ * do that, but we usually add it to the linear map ++ * anyway, in which case we should use the existing ++ * mapping. ++ */ ++ if (memblock_is_map_memory(phys)) ++ return (void __iomem *)__va(phys); ++ fallthrough; ++ ++ default: ++ if (region->attribute & EFI_MEMORY_WB) ++ prot = PAGE_KERNEL; ++ else if ((region->attribute & EFI_MEMORY_WC) || ++ (region->attribute & EFI_MEMORY_WT)) ++ prot = pgprot_writecombine(PAGE_KERNEL); ++ } ++ } ++ ++ return ioremap_prot(phys, size, pgprot_val(prot)); + } + + #ifdef CONFIG_PCI + + /* +- * These interfaces are defined just to enable building ACPI core. +- * TODO: Update it with actual implementation when external interrupt +- * controller support is added in RISC-V ACPI. ++ * raw_pci_read/write - Platform-specific PCI config space access. + */ +-int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, +- int reg, int len, u32 *val) ++int raw_pci_read(unsigned int domain, unsigned int bus, ++ unsigned int devfn, int reg, int len, u32 *val) + { +- return PCIBIOS_DEVICE_NOT_FOUND; +-} ++ struct pci_bus *b = pci_find_bus(domain, bus); + +-int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, +- int reg, int len, u32 val) +-{ +- return PCIBIOS_DEVICE_NOT_FOUND; ++ if (!b) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ return b->ops->read(b, devfn, reg, len, val); + } + +-int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) ++int raw_pci_write(unsigned int domain, unsigned int bus, ++ unsigned int devfn, int reg, int len, u32 val) + { +- return -1; +-} ++ struct pci_bus *b = pci_find_bus(domain, bus); + +-struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) +-{ +- return NULL; ++ if (!b) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ return b->ops->write(b, devfn, reg, len, val); + } ++ + #endif /* CONFIG_PCI */ +diff --git a/arch/riscv/kernel/acpi_numa.c b/arch/riscv/kernel/acpi_numa.c +new file mode 100644 +index 000000000000..1a97cbdafd01 +--- /dev/null ++++ b/arch/riscv/kernel/acpi_numa.c +@@ -0,0 +1,130 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * ACPI 6.6 based NUMA setup for RISCV ++ * Lots of code was borrowed from arch/arm64/kernel/acpi_numa.c ++ * ++ * Copyright 2004 Andi Kleen, SuSE Labs. ++ * Copyright (C) 2013-2016, Linaro Ltd. ++ * Author: Hanjun Guo ++ * Copyright (C) 2024 Intel Corporation. ++ * ++ * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs. ++ * ++ * Called from acpi_numa_init while reading the SRAT and SLIT tables. ++ * Assumes all memory regions belonging to a single proximity domain ++ * are in one chunk. Holes between them will be included in the node. ++ */ ++ ++#define pr_fmt(fmt) "ACPI: NUMA: " fmt ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE }; ++ ++int __init acpi_numa_get_nid(unsigned int cpu) ++{ ++ return acpi_early_node_map[cpu]; ++} ++ ++static inline int get_cpu_for_acpi_id(u32 uid) ++{ ++ int cpu; ++ ++ for (cpu = 0; cpu < nr_cpu_ids; cpu++) ++ if (uid == get_acpi_id_for_cpu(cpu)) ++ return cpu; ++ ++ return -EINVAL; ++} ++ ++static int __init acpi_parse_rintc_pxm(union acpi_subtable_headers *header, ++ const unsigned long end) ++{ ++ struct acpi_srat_rintc_affinity *pa; ++ int cpu, pxm, node; ++ ++ if (srat_disabled()) ++ return -EINVAL; ++ pa = (struct acpi_srat_rintc_affinity *)header; ++ if (!pa) ++ return -EINVAL; ++ ++ if (!(pa->flags & ACPI_SRAT_RINTC_ENABLED)) ++ return 0; ++ ++ pxm = pa->proximity_domain; ++ node = pxm_to_node(pxm); ++ ++ /* ++ * If we can't map the UID to a logical cpu this ++ * means that the UID is not part of possible cpus ++ * so we do not need a NUMA mapping for it, skip ++ * the SRAT entry and keep parsing. ++ */ ++ cpu = get_cpu_for_acpi_id(pa->acpi_processor_uid); ++ if (cpu < 0) ++ return 0; ++ ++ acpi_early_node_map[cpu] = node; ++ pr_info("SRAT: PXM %d -> HARTID 0x%lx -> Node %d\n", pxm, ++ cpuid_to_hartid_map(cpu), node); ++ ++ return 0; ++} ++ ++void __init acpi_map_cpus_to_nodes(void) ++{ ++ int i; ++ ++ /* ++ * In ACPI, SMP and CPU NUMA information is provided in separate ++ * static tables, namely the MADT and the SRAT. ++ * ++ * Thus, it is simpler to first create the cpu logical map through ++ * an MADT walk and then map the logical cpus to their node ids ++ * as separate steps. ++ */ ++ acpi_table_parse_entries(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat), ++ ACPI_SRAT_TYPE_RINTC_AFFINITY, acpi_parse_rintc_pxm, 0); ++ ++ for (i = 0; i < nr_cpu_ids; i++) ++ early_map_cpu_to_node(i, acpi_numa_get_nid(i)); ++} ++ ++/* Callback for Proximity Domain -> logical node ID mapping */ ++void __init acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa) ++{ ++ int pxm, node; ++ ++ if (srat_disabled()) ++ return; ++ ++ if (pa->header.length < sizeof(struct acpi_srat_rintc_affinity)) { ++ pr_err("SRAT: Invalid SRAT header length: %d\n", pa->header.length); ++ bad_srat(); ++ return; ++ } ++ ++ if (!(pa->flags & ACPI_SRAT_RINTC_ENABLED)) ++ return; ++ ++ pxm = pa->proximity_domain; ++ node = acpi_map_pxm_to_node(pxm); ++ ++ if (node == NUMA_NO_NODE) { ++ pr_err("SRAT: Too many proximity domains %d\n", pxm); ++ bad_srat(); ++ return; ++ } ++ ++ node_set(node, numa_nodes_parsed); ++} +diff --git a/arch/riscv/kernel/alternative.c b/arch/riscv/kernel/alternative.c +index 319a1da0358b..0128b161bfda 100644 +--- a/arch/riscv/kernel/alternative.c ++++ b/arch/riscv/kernel/alternative.c +@@ -43,7 +43,7 @@ static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info + + switch (cpu_mfr_info->vendor_id) { + #ifdef CONFIG_ERRATA_ANDES +- case ANDESTECH_VENDOR_ID: ++ case ANDES_VENDOR_ID: + cpu_mfr_info->patch_func = andes_errata_patch_func; + break; + #endif diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c -index bb5fb2b820a2..6fdb7d166a41 100644 +index bb5fb2b820a2..820f579e4581 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c -@@ -176,6 +176,7 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + + #include "copy-unaligned.h" + +@@ -32,6 +33,8 @@ + #define MISALIGNED_BUFFER_SIZE 0x4000 + #define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80) + ++static bool any_cpu_has_zicboz; ++ + unsigned long elf_hwcap __read_mostly; + + /* Host ISA bitmap */ +@@ -69,7 +72,7 @@ EXPORT_SYMBOL_GPL(riscv_isa_extension_base); + * + * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used. + */ +-bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit) ++bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit) + { + const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa; + +@@ -80,37 +83,204 @@ bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit) + } + EXPORT_SYMBOL_GPL(__riscv_isa_extension_available); + +-static bool riscv_isa_extension_check(int id) ++static int riscv_ext_zicbom_validate(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) + { +- switch (id) { +- case RISCV_ISA_EXT_ZICBOM: +- if (!riscv_cbom_block_size) { +- pr_err("Zicbom detected in ISA string, disabling as no cbom-block-size found\n"); +- return false; +- } else if (!is_power_of_2(riscv_cbom_block_size)) { +- pr_err("Zicbom disabled as cbom-block-size present, but is not a power-of-2\n"); +- return false; +- } +- return true; +- case RISCV_ISA_EXT_ZICBOZ: +- if (!riscv_cboz_block_size) { +- pr_err("Zicboz detected in ISA string, but no cboz-block-size found\n"); +- return false; +- } else if (!is_power_of_2(riscv_cboz_block_size)) { +- pr_err("cboz-block-size present, but is not a power-of-2\n"); +- return false; +- } +- return true; ++ if (!riscv_cbom_block_size) { ++ pr_err("Zicbom detected in ISA string, disabling as no cbom-block-size found\n"); ++ return -EINVAL; ++ } ++ if (!is_power_of_2(riscv_cbom_block_size)) { ++ pr_err("Zicbom disabled as cbom-block-size present, but is not a power-of-2\n"); ++ return -EINVAL; + } ++ return 0; ++} + +- return true; ++static int riscv_ext_zicboz_validate(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) ++{ ++ if (!riscv_cboz_block_size) { ++ pr_err("Zicboz detected in ISA string, disabling as no cboz-block-size found\n"); ++ return -EINVAL; ++ } ++ if (!is_power_of_2(riscv_cboz_block_size)) { ++ pr_err("Zicboz disabled as cboz-block-size present, but is not a power-of-2\n"); ++ return -EINVAL; ++ } ++ any_cpu_has_zicboz = true; ++ return 0; + } + +-#define __RISCV_ISA_EXT_DATA(_name, _id) { \ +- .name = #_name, \ +- .property = #_name, \ +- .id = _id, \ ++static int riscv_ext_zca_depends(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) ++{ ++ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZCA)) ++ return 0; ++ ++ return -EPROBE_DEFER; + } ++static int riscv_ext_zcd_validate(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) ++{ ++ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZCA) && ++ __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_d)) ++ return 0; ++ ++ return -EPROBE_DEFER; ++} ++ ++static int riscv_ext_zcf_validate(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) ++{ ++ if (IS_ENABLED(CONFIG_64BIT)) ++ return -EINVAL; ++ ++ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZCA) && ++ __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_f)) ++ return 0; ++ ++ return -EPROBE_DEFER; ++} ++ ++static int riscv_ext_svadu_validate(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) ++{ ++ /* SVADE has already been detected, use SVADE only */ ++ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_SVADE)) ++ return -EOPNOTSUPP; ++ ++ return 0; ++} ++ ++static const unsigned int riscv_zk_bundled_exts[] = { ++ RISCV_ISA_EXT_ZBKB, ++ RISCV_ISA_EXT_ZBKC, ++ RISCV_ISA_EXT_ZBKX, ++ RISCV_ISA_EXT_ZKND, ++ RISCV_ISA_EXT_ZKNE, ++ RISCV_ISA_EXT_ZKR, ++ RISCV_ISA_EXT_ZKT, ++}; ++ ++static const unsigned int riscv_zkn_bundled_exts[] = { ++ RISCV_ISA_EXT_ZBKB, ++ RISCV_ISA_EXT_ZBKC, ++ RISCV_ISA_EXT_ZBKX, ++ RISCV_ISA_EXT_ZKND, ++ RISCV_ISA_EXT_ZKNE, ++ RISCV_ISA_EXT_ZKNH, ++}; ++ ++static const unsigned int riscv_zks_bundled_exts[] = { ++ RISCV_ISA_EXT_ZBKB, ++ RISCV_ISA_EXT_ZBKC, ++ RISCV_ISA_EXT_ZKSED, ++ RISCV_ISA_EXT_ZKSH ++}; ++ ++#define RISCV_ISA_EXT_ZVKN \ ++ RISCV_ISA_EXT_ZVKNED, \ ++ RISCV_ISA_EXT_ZVKNHB, \ ++ RISCV_ISA_EXT_ZVKB, \ ++ RISCV_ISA_EXT_ZVKT ++ ++static const unsigned int riscv_zvkn_bundled_exts[] = { ++ RISCV_ISA_EXT_ZVKN ++}; ++ ++static const unsigned int riscv_zvknc_bundled_exts[] = { ++ RISCV_ISA_EXT_ZVKN, ++ RISCV_ISA_EXT_ZVBC ++}; ++ ++static const unsigned int riscv_zvkng_bundled_exts[] = { ++ RISCV_ISA_EXT_ZVKN, ++ RISCV_ISA_EXT_ZVKG ++}; ++ ++#define RISCV_ISA_EXT_ZVKS \ ++ RISCV_ISA_EXT_ZVKSED, \ ++ RISCV_ISA_EXT_ZVKSH, \ ++ RISCV_ISA_EXT_ZVKB, \ ++ RISCV_ISA_EXT_ZVKT ++ ++static const unsigned int riscv_zvks_bundled_exts[] = { ++ RISCV_ISA_EXT_ZVKS ++}; ++ ++static const unsigned int riscv_zvksc_bundled_exts[] = { ++ RISCV_ISA_EXT_ZVKS, ++ RISCV_ISA_EXT_ZVBC ++}; ++ ++static const unsigned int riscv_zvksg_bundled_exts[] = { ++ RISCV_ISA_EXT_ZVKS, ++ RISCV_ISA_EXT_ZVKG ++}; ++ ++static const unsigned int riscv_zvbb_exts[] = { ++ RISCV_ISA_EXT_ZVKB ++}; ++ ++#define RISCV_ISA_EXT_ZVE64F_IMPLY_LIST \ ++ RISCV_ISA_EXT_ZVE64X, \ ++ RISCV_ISA_EXT_ZVE32F, \ ++ RISCV_ISA_EXT_ZVE32X ++ ++#define RISCV_ISA_EXT_ZVE64D_IMPLY_LIST \ ++ RISCV_ISA_EXT_ZVE64F, \ ++ RISCV_ISA_EXT_ZVE64F_IMPLY_LIST ++ ++#define RISCV_ISA_EXT_V_IMPLY_LIST \ ++ RISCV_ISA_EXT_ZVE64D, \ ++ RISCV_ISA_EXT_ZVE64D_IMPLY_LIST ++ ++static const unsigned int riscv_zve32f_exts[] = { ++ RISCV_ISA_EXT_ZVE32X ++}; ++ ++static const unsigned int riscv_zve64f_exts[] = { ++ RISCV_ISA_EXT_ZVE64F_IMPLY_LIST ++}; ++ ++static const unsigned int riscv_zve64d_exts[] = { ++ RISCV_ISA_EXT_ZVE64D_IMPLY_LIST ++}; ++ ++static const unsigned int riscv_v_exts[] = { ++ RISCV_ISA_EXT_V_IMPLY_LIST ++}; ++ ++static const unsigned int riscv_zve64x_exts[] = { ++ RISCV_ISA_EXT_ZVE32X, ++ RISCV_ISA_EXT_ZVE64X ++}; ++ ++/* ++ * While the [ms]envcfg CSRs were not defined until version 1.12 of the RISC-V ++ * privileged ISA, the existence of the CSRs is implied by any extension which ++ * specifies [ms]envcfg bit(s). Hence, we define a custom ISA extension for the ++ * existence of the CSR, and treat it as a subset of those other extensions. ++ */ ++static const unsigned int riscv_xlinuxenvcfg_exts[] = { ++ RISCV_ISA_EXT_XLINUXENVCFG ++}; ++ ++/* ++ * Zc* spec states that: ++ * - C always implies Zca ++ * - C+F implies Zcf (RV32 only) ++ * - C+D implies Zcd ++ * ++ * These extensions will be enabled and then validated depending on the ++ * availability of F/D RV32. ++ */ ++static const unsigned int riscv_c_exts[] = { ++ RISCV_ISA_EXT_ZCA, ++ RISCV_ISA_EXT_ZCF, ++ RISCV_ISA_EXT_ZCD, ++}; + + /* + * The canonical order of ISA extension names in the ISA string is defined in +@@ -158,36 +328,177 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { + __RISCV_ISA_EXT_DATA(f, RISCV_ISA_EXT_f), + __RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d), + __RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q), +- __RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c), +- __RISCV_ISA_EXT_DATA(b, RISCV_ISA_EXT_b), +- __RISCV_ISA_EXT_DATA(k, RISCV_ISA_EXT_k), +- __RISCV_ISA_EXT_DATA(j, RISCV_ISA_EXT_j), +- __RISCV_ISA_EXT_DATA(p, RISCV_ISA_EXT_p), +- __RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v), ++ __RISCV_ISA_EXT_SUPERSET(c, RISCV_ISA_EXT_c, riscv_c_exts), ++ __RISCV_ISA_EXT_SUPERSET(v, RISCV_ISA_EXT_v, riscv_v_exts), + __RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h), +- __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM), +- __RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ), ++ __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicbom, RISCV_ISA_EXT_ZICBOM, riscv_xlinuxenvcfg_exts, ++ riscv_ext_zicbom_validate), ++ __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicboz, RISCV_ISA_EXT_ZICBOZ, riscv_xlinuxenvcfg_exts, ++ riscv_ext_zicboz_validate), ++ __RISCV_ISA_EXT_DATA(ziccrse, RISCV_ISA_EXT_ZICCRSE), + __RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR), ++ __RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND), + __RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR), + __RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI), ++ __RISCV_ISA_EXT_DATA(zihintntl, RISCV_ISA_EXT_ZIHINTNTL), + __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE), + __RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM), ++ __RISCV_ISA_EXT_DATA(zimop, RISCV_ISA_EXT_ZIMOP), ++ __RISCV_ISA_EXT_DATA(zabha, RISCV_ISA_EXT_ZABHA), ++ __RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS), ++ __RISCV_ISA_EXT_DATA(zawrs, RISCV_ISA_EXT_ZAWRS), ++ __RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA), ++ __RISCV_ISA_EXT_DATA(zfh, RISCV_ISA_EXT_ZFH), ++ __RISCV_ISA_EXT_DATA(zfhmin, RISCV_ISA_EXT_ZFHMIN), ++ __RISCV_ISA_EXT_DATA(zca, RISCV_ISA_EXT_ZCA), ++ __RISCV_ISA_EXT_DATA_VALIDATE(zcb, RISCV_ISA_EXT_ZCB, riscv_ext_zca_depends), ++ __RISCV_ISA_EXT_DATA_VALIDATE(zcd, RISCV_ISA_EXT_ZCD, riscv_ext_zcd_validate), ++ __RISCV_ISA_EXT_DATA_VALIDATE(zcf, RISCV_ISA_EXT_ZCF, riscv_ext_zcf_validate), ++ __RISCV_ISA_EXT_DATA_VALIDATE(zcmop, RISCV_ISA_EXT_ZCMOP, riscv_ext_zca_depends), + __RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA), __RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB), ++ __RISCV_ISA_EXT_DATA(zbc, RISCV_ISA_EXT_ZBC), ++ __RISCV_ISA_EXT_DATA(zbkb, RISCV_ISA_EXT_ZBKB), ++ __RISCV_ISA_EXT_DATA(zbkc, RISCV_ISA_EXT_ZBKC), ++ __RISCV_ISA_EXT_DATA(zbkx, RISCV_ISA_EXT_ZBKX), __RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS), ++ __RISCV_ISA_EXT_BUNDLE(zk, riscv_zk_bundled_exts), ++ __RISCV_ISA_EXT_BUNDLE(zkn, riscv_zkn_bundled_exts), ++ __RISCV_ISA_EXT_DATA(zknd, RISCV_ISA_EXT_ZKND), ++ __RISCV_ISA_EXT_DATA(zkne, RISCV_ISA_EXT_ZKNE), ++ __RISCV_ISA_EXT_DATA(zknh, RISCV_ISA_EXT_ZKNH), ++ __RISCV_ISA_EXT_DATA(zkr, RISCV_ISA_EXT_ZKR), ++ __RISCV_ISA_EXT_BUNDLE(zks, riscv_zks_bundled_exts), ++ __RISCV_ISA_EXT_DATA(zkt, RISCV_ISA_EXT_ZKT), ++ __RISCV_ISA_EXT_DATA(zksed, RISCV_ISA_EXT_ZKSED), ++ __RISCV_ISA_EXT_DATA(zksh, RISCV_ISA_EXT_ZKSH), ++ __RISCV_ISA_EXT_DATA(ztso, RISCV_ISA_EXT_ZTSO), ++ __RISCV_ISA_EXT_SUPERSET(zvbb, RISCV_ISA_EXT_ZVBB, riscv_zvbb_exts), ++ __RISCV_ISA_EXT_DATA(zvbc, RISCV_ISA_EXT_ZVBC), ++ __RISCV_ISA_EXT_SUPERSET(zve32f, RISCV_ISA_EXT_ZVE32F, riscv_zve32f_exts), ++ __RISCV_ISA_EXT_DATA(zve32x, RISCV_ISA_EXT_ZVE32X), ++ __RISCV_ISA_EXT_SUPERSET(zve64d, RISCV_ISA_EXT_ZVE64D, riscv_zve64d_exts), ++ __RISCV_ISA_EXT_SUPERSET(zve64f, RISCV_ISA_EXT_ZVE64F, riscv_zve64f_exts), ++ __RISCV_ISA_EXT_SUPERSET(zve64x, RISCV_ISA_EXT_ZVE64X, riscv_zve64x_exts), ++ __RISCV_ISA_EXT_DATA(zvfh, RISCV_ISA_EXT_ZVFH), ++ __RISCV_ISA_EXT_DATA(zvfhmin, RISCV_ISA_EXT_ZVFHMIN), ++ __RISCV_ISA_EXT_DATA(zvkb, RISCV_ISA_EXT_ZVKB), ++ __RISCV_ISA_EXT_DATA(zvkg, RISCV_ISA_EXT_ZVKG), ++ __RISCV_ISA_EXT_BUNDLE(zvkn, riscv_zvkn_bundled_exts), ++ __RISCV_ISA_EXT_BUNDLE(zvknc, riscv_zvknc_bundled_exts), ++ __RISCV_ISA_EXT_DATA(zvkned, RISCV_ISA_EXT_ZVKNED), ++ __RISCV_ISA_EXT_BUNDLE(zvkng, riscv_zvkng_bundled_exts), ++ __RISCV_ISA_EXT_DATA(zvknha, RISCV_ISA_EXT_ZVKNHA), ++ __RISCV_ISA_EXT_DATA(zvknhb, RISCV_ISA_EXT_ZVKNHB), ++ __RISCV_ISA_EXT_BUNDLE(zvks, riscv_zvks_bundled_exts), ++ __RISCV_ISA_EXT_BUNDLE(zvksc, riscv_zvksc_bundled_exts), ++ __RISCV_ISA_EXT_DATA(zvksed, RISCV_ISA_EXT_ZVKSED), ++ __RISCV_ISA_EXT_DATA(zvksh, RISCV_ISA_EXT_ZVKSH), ++ __RISCV_ISA_EXT_BUNDLE(zvksg, riscv_zvksg_bundled_exts), ++ __RISCV_ISA_EXT_DATA(zvkt, RISCV_ISA_EXT_ZVKT), __RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA), ++ __RISCV_ISA_EXT_DATA(smmpm, RISCV_ISA_EXT_SMMPM), ++ __RISCV_ISA_EXT_SUPERSET(smnpm, RISCV_ISA_EXT_SMNPM, riscv_xlinuxenvcfg_exts), + __RISCV_ISA_EXT_DATA(smstateen, RISCV_ISA_EXT_SMSTATEEN), __RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA), __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF), ++ __RISCV_ISA_EXT_SUPERSET(ssnpm, RISCV_ISA_EXT_SSNPM, riscv_xlinuxenvcfg_exts), __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC), ++ __RISCV_ISA_EXT_DATA(svade, RISCV_ISA_EXT_SVADE), ++ __RISCV_ISA_EXT_DATA_VALIDATE(svadu, RISCV_ISA_EXT_SVADU, riscv_ext_svadu_validate), + __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL), + __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT), + __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT), ++ __RISCV_ISA_EXT_DATA(svvptc, RISCV_ISA_EXT_SVVPTC), + }; + + const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext); + +-static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct riscv_isainfo *isainfo, +- unsigned long *isa2hwcap, const char *isa) ++static void riscv_isa_set_ext(const struct riscv_isa_ext_data *ext, unsigned long *bitmap) ++{ ++ if (ext->id != RISCV_ISA_EXT_INVALID) ++ set_bit(ext->id, bitmap); ++ ++ for (int i = 0; i < ext->subset_ext_size; i++) { ++ if (ext->subset_ext_ids[i] != RISCV_ISA_EXT_INVALID) ++ set_bit(ext->subset_ext_ids[i], bitmap); ++ } ++} ++ ++static const struct riscv_isa_ext_data *riscv_get_isa_ext_data(unsigned int ext_id) ++{ ++ for (int i = 0; i < riscv_isa_ext_count; i++) { ++ if (riscv_isa_ext[i].id == ext_id) ++ return &riscv_isa_ext[i]; ++ } ++ ++ return NULL; ++} ++ ++/* ++ * "Resolve" a source ISA bitmap into one that matches kernel configuration as ++ * well as correct extension dependencies. Some extensions depends on specific ++ * kernel configuration to be usable (V needs CONFIG_RISCV_ISA_V for instance) ++ * and this function will actually validate all the extensions provided in ++ * source_isa into the resolved_isa based on extensions validate() callbacks. ++ */ ++static void __init riscv_resolve_isa(unsigned long *source_isa, ++ unsigned long *resolved_isa, unsigned long *this_hwcap, ++ unsigned long *isa2hwcap) ++{ ++ bool loop; ++ const struct riscv_isa_ext_data *ext; ++ DECLARE_BITMAP(prev_resolved_isa, RISCV_ISA_EXT_MAX); ++ int max_loop_count = riscv_isa_ext_count, ret; ++ unsigned int bit; ++ ++ do { ++ loop = false; ++ if (max_loop_count-- < 0) { ++ pr_err("Failed to reach a stable ISA state\n"); ++ return; ++ } ++ bitmap_copy(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX); ++ for_each_set_bit(bit, source_isa, RISCV_ISA_EXT_MAX) { ++ ext = riscv_get_isa_ext_data(bit); ++ ++ if (ext && ext->validate) { ++ ret = ext->validate(ext, resolved_isa); ++ if (ret == -EPROBE_DEFER) { ++ loop = true; ++ continue; ++ } else if (ret) { ++ /* Disable the extension entirely */ ++ clear_bit(bit, source_isa); ++ continue; ++ } ++ } ++ ++ set_bit(bit, resolved_isa); ++ /* No need to keep it in source isa now that it is enabled */ ++ clear_bit(bit, source_isa); ++ ++ /* Single letter extensions get set in hwcap */ ++ if (bit < RISCV_ISA_EXT_BASE) ++ *this_hwcap |= isa2hwcap[bit]; ++ } ++ } while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa))); ++} ++ ++static void __init match_isa_ext(const char *name, const char *name_end, unsigned long *bitmap) ++{ ++ for (int i = 0; i < riscv_isa_ext_count; i++) { ++ const struct riscv_isa_ext_data *ext = &riscv_isa_ext[i]; ++ ++ if ((name_end - name == strlen(ext->name)) && ++ !strncasecmp(name, ext->name, name_end - name)) { ++ riscv_isa_set_ext(ext, bitmap); ++ break; ++ } ++ } ++} ++ ++static void __init riscv_parse_isa_string(const char *isa, unsigned long *bitmap) + { + /* + * For all possible cpus, we have already validated in +@@ -200,15 +511,31 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc + while (*isa) { + const char *ext = isa++; + const char *ext_end = isa; +- bool ext_long = false, ext_err = false; ++ bool ext_err = false; + + switch (*ext) { ++ case 'x': ++ case 'X': ++ if (acpi_disabled) ++ pr_warn_once("Vendor extensions are ignored in riscv,isa. Use riscv,isa-extensions instead."); ++ /* ++ * To skip an extension, we find its end. ++ * As multi-letter extensions must be split from other multi-letter ++ * extensions with an "_", the end of a multi-letter extension will ++ * either be the null character or the "_" at the start of the next ++ * multi-letter extension. ++ */ ++ for (; *isa && *isa != '_'; ++isa) ++ ; ++ ext_err = true; ++ break; + case 's': + /* +- * Workaround for invalid single-letter 's' & 'u'(QEMU). ++ * Workaround for invalid single-letter 's' & 'u' (QEMU). + * No need to set the bit in riscv_isa as 's' & 'u' are +- * not valid ISA extensions. It works until multi-letter +- * extension starting with "Su" appears. ++ * not valid ISA extensions. It works unless the first ++ * multi-letter extension in the ISA string begins with ++ * "Su" and is not prefixed with an underscore. + */ + if (ext[-1] != '_' && ext[1] == 'u') { + ++isa; +@@ -217,8 +544,6 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc + } + fallthrough; + case 'S': +- case 'x': +- case 'X': + case 'z': + case 'Z': + /* +@@ -239,7 +564,6 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc + * character itself while eliminating the extensions version number. + * A simple re-increment solves this problem. + */ +- ext_long = true; + for (; *isa && *isa != '_'; ++isa) + if (unlikely(!isalnum(*isa))) + ext_err = true; +@@ -317,29 +641,10 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc + if (*isa == '_') + ++isa; + +-#define SET_ISA_EXT_MAP(name, bit) \ +- do { \ +- if ((ext_end - ext == strlen(name)) && \ +- !strncasecmp(ext, name, strlen(name)) && \ +- riscv_isa_extension_check(bit)) \ +- set_bit(bit, isainfo->isa); \ +- } while (false) \ +- + if (unlikely(ext_err)) + continue; +- if (!ext_long) { +- int nr = tolower(*ext) - 'a'; + +- if (riscv_isa_extension_check(nr)) { +- *this_hwcap |= isa2hwcap[nr]; +- set_bit(nr, isainfo->isa); +- } +- } else { +- for (int i = 0; i < riscv_isa_ext_count; i++) +- SET_ISA_EXT_MAP(riscv_isa_ext[i].name, +- riscv_isa_ext[i].id); +- } +-#undef SET_ISA_EXT_MAP ++ match_isa_ext(ext, ext_end, bitmap); + } + } + +@@ -366,6 +671,7 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) + for_each_possible_cpu(cpu) { + struct riscv_isainfo *isainfo = &hart_isa[cpu]; + unsigned long this_hwcap = 0; ++ DECLARE_BITMAP(source_isa, RISCV_ISA_EXT_MAX) = { 0 }; + + if (acpi_disabled) { + node = of_cpu_device_node_get(cpu); +@@ -388,7 +694,7 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) + } + } + +- riscv_parse_isa_string(&this_hwcap, isainfo, isa2hwcap, isa); ++ riscv_parse_isa_string(isa, source_isa); + + /* + * These ones were as they were part of the base ISA when the +@@ -396,10 +702,10 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) + * unconditionally where `i` is in riscv,isa on DT systems. + */ + if (acpi_disabled) { +- set_bit(RISCV_ISA_EXT_ZICSR, isainfo->isa); +- set_bit(RISCV_ISA_EXT_ZIFENCEI, isainfo->isa); +- set_bit(RISCV_ISA_EXT_ZICNTR, isainfo->isa); +- set_bit(RISCV_ISA_EXT_ZIHPM, isainfo->isa); ++ set_bit(RISCV_ISA_EXT_ZICSR, source_isa); ++ set_bit(RISCV_ISA_EXT_ZIFENCEI, source_isa); ++ set_bit(RISCV_ISA_EXT_ZICNTR, source_isa); ++ set_bit(RISCV_ISA_EXT_ZIHPM, source_isa); + } + + /* +@@ -412,9 +718,11 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) + */ + if (acpi_disabled && boot_vendorid == THEAD_VENDOR_ID && boot_archid == 0x0) { + this_hwcap &= ~isa2hwcap[RISCV_ISA_EXT_v]; +- clear_bit(RISCV_ISA_EXT_v, isainfo->isa); ++ clear_bit(RISCV_ISA_EXT_v, source_isa); + } + ++ riscv_resolve_isa(source_isa, isainfo->isa, &this_hwcap, isa2hwcap); ++ + /* + * All "okay" hart should have same isa. Set HWCAP based on + * common capabilities of every "okay" hart, in case they don't +@@ -435,6 +743,61 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) + acpi_put_table((struct acpi_table_header *)rhct); + } + ++static void __init riscv_fill_cpu_vendor_ext(struct device_node *cpu_node, int cpu) ++{ ++ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) ++ return; ++ ++ for (int i = 0; i < riscv_isa_vendor_ext_list_size; i++) { ++ struct riscv_isa_vendor_ext_data_list *ext_list = riscv_isa_vendor_ext_list[i]; ++ ++ for (int j = 0; j < ext_list->ext_data_count; j++) { ++ const struct riscv_isa_ext_data ext = ext_list->ext_data[j]; ++ struct riscv_isavendorinfo *isavendorinfo = &ext_list->per_hart_isa_bitmap[cpu]; ++ ++ if (of_property_match_string(cpu_node, "riscv,isa-extensions", ++ ext.property) < 0) ++ continue; ++ ++ /* ++ * Assume that subset extensions are all members of the ++ * same vendor. ++ */ ++ if (ext.subset_ext_size) ++ for (int k = 0; k < ext.subset_ext_size; k++) ++ set_bit(ext.subset_ext_ids[k], isavendorinfo->isa); ++ ++ set_bit(ext.id, isavendorinfo->isa); ++ } ++ } ++} ++ ++/* ++ * Populate all_harts_isa_bitmap for each vendor with all of the extensions that ++ * are shared across CPUs for that vendor. ++ */ ++static void __init riscv_fill_vendor_ext_list(int cpu) ++{ ++ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) ++ return; ++ ++ for (int i = 0; i < riscv_isa_vendor_ext_list_size; i++) { ++ struct riscv_isa_vendor_ext_data_list *ext_list = riscv_isa_vendor_ext_list[i]; ++ ++ if (!ext_list->is_initialized) { ++ bitmap_copy(ext_list->all_harts_isa_bitmap.isa, ++ ext_list->per_hart_isa_bitmap[cpu].isa, ++ RISCV_ISA_VENDOR_EXT_MAX); ++ ext_list->is_initialized = true; ++ } else { ++ bitmap_and(ext_list->all_harts_isa_bitmap.isa, ++ ext_list->all_harts_isa_bitmap.isa, ++ ext_list->per_hart_isa_bitmap[cpu].isa, ++ RISCV_ISA_VENDOR_EXT_MAX); ++ } ++ } ++} ++ + static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) + { + unsigned int cpu; +@@ -443,6 +806,7 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) + unsigned long this_hwcap = 0; + struct device_node *cpu_node; + struct riscv_isainfo *isainfo = &hart_isa[cpu]; ++ DECLARE_BITMAP(source_isa, RISCV_ISA_EXT_MAX) = { 0 }; + + cpu_node = of_cpu_device_node_get(cpu); + if (!cpu_node) { +@@ -456,20 +820,18 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) + } + + for (int i = 0; i < riscv_isa_ext_count; i++) { +- if (of_property_match_string(cpu_node, "riscv,isa-extensions", +- riscv_isa_ext[i].property) < 0) +- continue; ++ const struct riscv_isa_ext_data *ext = &riscv_isa_ext[i]; + +- if (!riscv_isa_extension_check(riscv_isa_ext[i].id)) ++ if (of_property_match_string(cpu_node, "riscv,isa-extensions", ++ ext->property) < 0) + continue; + +- /* Only single letter extensions get set in hwcap */ +- if (strnlen(riscv_isa_ext[i].name, 2) == 1) +- this_hwcap |= isa2hwcap[riscv_isa_ext[i].id]; +- +- set_bit(riscv_isa_ext[i].id, isainfo->isa); ++ riscv_isa_set_ext(ext, source_isa); + } + ++ riscv_resolve_isa(source_isa, isainfo->isa, &this_hwcap, isa2hwcap); ++ riscv_fill_cpu_vendor_ext(cpu_node, cpu); ++ + of_node_put(cpu_node); + + /* +@@ -485,6 +847,8 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) + bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX); + else + bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX); ++ ++ riscv_fill_vendor_ext_list(cpu); + } + + if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX)) +@@ -539,8 +903,14 @@ void __init riscv_fill_hwcap(void) + elf_hwcap &= ~COMPAT_HWCAP_ISA_F; + } + +- if (elf_hwcap & COMPAT_HWCAP_ISA_V) { ++ if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_ZVE32X)) { ++ /* ++ * This cannot fail when called on the boot hart ++ */ + riscv_v_setup_vsize(); ++ } ++ ++ if (elf_hwcap & COMPAT_HWCAP_ISA_V) { + /* + * ISA string in device tree might have 'v' flag, but + * CONFIG_RISCV_ISA_V is disabled in kernel. +@@ -668,7 +1038,7 @@ void check_unaligned_access(int cpu) + __free_pages(page, get_order(MISALIGNED_BUFFER_SIZE)); + } + +-static int check_unaligned_access_boot_cpu(void) ++static int __init check_unaligned_access_boot_cpu(void) + { + check_unaligned_access(0); + return 0; +@@ -676,6 +1046,14 @@ static int check_unaligned_access_boot_cpu(void) + + arch_initcall(check_unaligned_access_boot_cpu); + ++void __init riscv_user_isa_enable(void) ++{ ++ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICBOZ)) ++ csr_set(CSR_ENVCFG, ENVCFG_CBZE); ++ else if (any_cpu_has_zicboz) ++ pr_warn("Zicboz disabled as it is unavailable on some harts\n"); ++} ++ + #ifdef CONFIG_RISCV_ALTERNATIVE + /* + * Alternative patch sites consider 48 bits when determining when to patch +@@ -716,28 +1094,45 @@ void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin, + { + struct alt_entry *alt; + void *oldptr, *altptr; +- u16 id, value; ++ u16 id, value, vendor; + + if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) + return; + + for (alt = begin; alt < end; alt++) { +- if (alt->vendor_id != 0) +- continue; +- + id = PATCH_ID_CPUFEATURE_ID(alt->patch_id); ++ vendor = PATCH_ID_CPUFEATURE_ID(alt->vendor_id); + +- if (id >= RISCV_ISA_EXT_MAX) { +- WARN(1, "This extension id:%d is not in ISA extension list", id); +- continue; +- } ++ /* ++ * Any alternative with a patch_id that is less than ++ * RISCV_ISA_EXT_MAX is interpreted as a standard extension. ++ * ++ * Any alternative with patch_id that is greater than or equal ++ * to RISCV_VENDOR_EXT_ALTERNATIVES_BASE is interpreted as a ++ * vendor extension. ++ */ ++ if (id < RISCV_ISA_EXT_MAX) { ++ /* ++ * This patch should be treated as errata so skip ++ * processing here. ++ */ ++ if (alt->vendor_id != 0) ++ continue; + +- if (!__riscv_isa_extension_available(NULL, id)) +- continue; ++ if (!__riscv_isa_extension_available(NULL, id)) ++ continue; + +- value = PATCH_ID_CPUFEATURE_VALUE(alt->patch_id); +- if (!riscv_cpufeature_patch_check(id, value)) ++ value = PATCH_ID_CPUFEATURE_VALUE(alt->patch_id); ++ if (!riscv_cpufeature_patch_check(id, value)) ++ continue; ++ } else if (id >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE) { ++ if (!__riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, ++ id - RISCV_VENDOR_EXT_ALTERNATIVES_BASE)) ++ continue; ++ } else { ++ WARN(1, "This extension id:%d is not in ISA extension list", id); + continue; ++ } + + oldptr = ALT_OLD_PTR(alt); + altptr = ALT_ALT_PTR(alt); diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index df4f6fec5d17..ced5a09abaaa 100644 --- a/arch/riscv/kernel/module.c @@ -22188,11 +36910,129 @@ index a4559695ce62..0e2e19352469 100644 + return 0; } +IRQCHIP_DECLARE(riscv_aclint_sswi, "riscv,aclint-sswi", aclint_sswi_probe); +diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c +index c2cdf812ebd0..d949fd3c0884 100644 +--- a/arch/riscv/kernel/setup.c ++++ b/arch/riscv/kernel/setup.c +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -299,17 +300,22 @@ void __init setup_arch(char **cmdline_p) + setup_smp(); + #endif + +- if (!acpi_disabled) ++ if (!acpi_disabled) { + acpi_init_rintc_map(); ++ acpi_map_cpus_to_nodes(); ++ } + + riscv_init_cbo_blocksizes(); + riscv_fill_hwcap(); + init_rt_signal_env(); + apply_boot_alternatives(); ++ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) && + riscv_isa_extension_available(NULL, ZICBOM)) + riscv_noncoherent_supported(); + riscv_set_dma_cache_alignment(); ++ ++ riscv_user_isa_enable(); + } + + bool arch_cpu_is_hotpluggable(int cpu) +diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c +index 40420afbb1a0..ccb0f93c9786 100644 +--- a/arch/riscv/kernel/smp.c ++++ b/arch/riscv/kernel/smp.c +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + + enum ipi_message_type { + IPI_RESCHEDULE, +@@ -33,6 +34,7 @@ enum ipi_message_type { + IPI_CPU_CRASH_STOP, + IPI_IRQ_WORK, + IPI_TIMER, ++ IPI_CPU_BACKTRACE, + IPI_MAX + }; + +@@ -136,6 +138,9 @@ static irqreturn_t handle_IPI(int irq, void *data) + tick_receive_broadcast(); + break; + #endif ++ case IPI_CPU_BACKTRACE: ++ nmi_cpu_backtrace(get_irq_regs()); ++ break; + default: + pr_warn("CPU%d: unhandled IPI%d\n", smp_processor_id(), ipi); + break; +@@ -212,6 +217,7 @@ static const char * const ipi_names[] = { + [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", + [IPI_IRQ_WORK] = "IRQ work interrupts", + [IPI_TIMER] = "Timer broadcast interrupts", ++ [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts", + }; + + void show_ipi_stats(struct seq_file *p, int prec) +@@ -332,3 +338,14 @@ void arch_smp_send_reschedule(int cpu) + send_ipi_single(cpu, IPI_RESCHEDULE); + } + EXPORT_SYMBOL_GPL(arch_smp_send_reschedule); ++ ++static void riscv_backtrace_ipi(cpumask_t *mask) ++{ ++ send_ipi_mask(mask, IPI_CPU_BACKTRACE); ++} ++ ++bool arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu) ++{ ++ nmi_trigger_cpumask_backtrace(mask, exclude_cpu, riscv_backtrace_ipi); ++ return true; ++} +diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c +index 1b8da4e40a4d..3f37eec7a790 100644 +--- a/arch/riscv/kernel/smpboot.c ++++ b/arch/riscv/kernel/smpboot.c +@@ -25,6 +25,8 @@ + #include + #include + #include ++ ++#include + #include + #include + #include +@@ -105,7 +107,6 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un + if (hart == cpuid_to_hartid_map(0)) { + BUG_ON(found_boot_cpu); + found_boot_cpu = true; +- early_map_cpu_to_node(0, acpi_numa_get_nid(cpu_count)); + return 0; + } + +@@ -115,7 +116,6 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un + } + + cpuid_to_hartid_map(cpu_count) = hart; +- early_map_cpu_to_node(cpu_count, acpi_numa_get_nid(cpu_count)); + cpu_count++; + + return 0; diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c -index 3c89b8ec69c4..239509367e42 100644 +index 3c89b8ec69c4..9a8a0dc035b2 100644 --- a/arch/riscv/kernel/suspend.c +++ b/arch/riscv/kernel/suspend.c -@@ -4,8 +4,12 @@ +@@ -4,13 +4,18 @@ * Copyright (c) 2022 Ventana Micro Systems Inc. */ @@ -22205,7 +37045,25 @@ index 3c89b8ec69c4..239509367e42 100644 #include void suspend_save_csrs(struct suspend_context *context) -@@ -85,3 +89,43 @@ int cpu_suspend(unsigned long arg, + { +- context->scratch = csr_read(CSR_SCRATCH); ++ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_XLINUXENVCFG)) ++ context->envcfg = csr_read(CSR_ENVCFG); + context->tvec = csr_read(CSR_TVEC); + context->ie = csr_read(CSR_IE); + +@@ -31,7 +36,9 @@ void suspend_save_csrs(struct suspend_context *context) + + void suspend_restore_csrs(struct suspend_context *context) + { +- csr_write(CSR_SCRATCH, context->scratch); ++ csr_write(CSR_SCRATCH, 0); ++ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_XLINUXENVCFG)) ++ csr_write(CSR_ENVCFG, context->envcfg); + csr_write(CSR_TVEC, context->tvec); + csr_write(CSR_IE, context->ie); + +@@ -85,3 +92,92 @@ int cpu_suspend(unsigned long arg, return rc; } @@ -22248,9 +37106,817 @@ index 3c89b8ec69c4..239509367e42 100644 +} + +arch_initcall(sbi_system_suspend_init); ++ ++static int sbi_suspend_finisher(unsigned long suspend_type, ++ unsigned long resume_addr, ++ unsigned long opaque) ++{ ++ struct sbiret ret; ++ ++ ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, ++ suspend_type, resume_addr, opaque, 0, 0, 0); ++ ++ return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0; ++} ++ ++int riscv_sbi_hart_suspend(u32 state) ++{ ++ if (state & SBI_HSM_SUSP_NON_RET_BIT) ++ return cpu_suspend(state, sbi_suspend_finisher); ++ else ++ return sbi_suspend_finisher(state, 0, 0); ++} ++ ++bool riscv_sbi_suspend_state_is_valid(u32 state) ++{ ++ if (state > SBI_HSM_SUSPEND_RET_DEFAULT && ++ state < SBI_HSM_SUSPEND_RET_PLATFORM) ++ return false; ++ ++ if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT && ++ state < SBI_HSM_SUSPEND_NON_RET_PLATFORM) ++ return false; ++ ++ return true; ++} ++ ++bool riscv_sbi_hsm_is_supported(void) ++{ ++ /* ++ * The SBI HSM suspend function is only available when: ++ * 1) SBI version is 0.3 or higher ++ * 2) SBI HSM extension is available ++ */ ++ if (sbi_spec_version < sbi_mk_version(0, 3) || ++ !sbi_probe_extension(SBI_EXT_HSM)) { ++ pr_info("HSM suspend not available\n"); ++ return false; ++ } ++ ++ return true; ++} +#endif /* CONFIG_RISCV_SBI */ +diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c +new file mode 100644 +index 000000000000..052a41f53dc2 +--- /dev/null ++++ b/arch/riscv/kernel/sys_hwprobe.c +@@ -0,0 +1,349 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * The hwprobe interface, for allowing userspace to probe to see which features ++ * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for ++ * more details. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ ++static void hwprobe_arch_id(struct riscv_hwprobe *pair, ++ const struct cpumask *cpus) ++{ ++ u64 id = -1ULL; ++ bool first = true; ++ int cpu; ++ ++ for_each_cpu(cpu, cpus) { ++ u64 cpu_id; ++ ++ switch (pair->key) { ++ case RISCV_HWPROBE_KEY_MVENDORID: ++ cpu_id = riscv_cached_mvendorid(cpu); ++ break; ++ case RISCV_HWPROBE_KEY_MIMPID: ++ cpu_id = riscv_cached_mimpid(cpu); ++ break; ++ case RISCV_HWPROBE_KEY_MARCHID: ++ cpu_id = riscv_cached_marchid(cpu); ++ break; ++ } ++ ++ if (first) { ++ id = cpu_id; ++ first = false; ++ } ++ ++ /* ++ * If there's a mismatch for the given set, return -1 in the ++ * value. ++ */ ++ if (id != cpu_id) { ++ id = -1ULL; ++ break; ++ } ++ } ++ ++ pair->value = id; ++} ++ ++static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, ++ const struct cpumask *cpus) ++{ ++ int cpu; ++ u64 missing = 0; ++ ++ pair->value = 0; ++ if (has_fpu()) ++ pair->value |= RISCV_HWPROBE_IMA_FD; ++ ++ if (riscv_isa_extension_available(NULL, c)) ++ pair->value |= RISCV_HWPROBE_IMA_C; ++ ++ if (has_vector()) ++ pair->value |= RISCV_HWPROBE_IMA_V; ++ ++ /* ++ * Loop through and record extensions that 1) anyone has, and 2) anyone ++ * doesn't have. ++ */ ++ for_each_cpu(cpu, cpus) { ++ struct riscv_isainfo *isainfo = &hart_isa[cpu]; ++ ++#define EXT_KEY(ext) \ ++ do { \ ++ if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \ ++ pair->value |= RISCV_HWPROBE_EXT_##ext; \ ++ else \ ++ missing |= RISCV_HWPROBE_EXT_##ext; \ ++ } while (false) ++ ++ /* ++ * Only use EXT_KEY() for extensions which can be exposed to userspace, ++ * regardless of the kernel's configuration, as no other checks, besides ++ * presence in the hart_isa bitmap, are made. ++ */ ++ EXT_KEY(ZACAS); ++ EXT_KEY(ZAWRS); ++ EXT_KEY(ZBA); ++ EXT_KEY(ZBB); ++ EXT_KEY(ZBC); ++ EXT_KEY(ZBKB); ++ EXT_KEY(ZBKC); ++ EXT_KEY(ZBKX); ++ EXT_KEY(ZBS); ++ EXT_KEY(ZCA); ++ EXT_KEY(ZCB); ++ EXT_KEY(ZCMOP); ++ EXT_KEY(ZICBOZ); ++ EXT_KEY(ZICOND); ++ EXT_KEY(ZIHINTNTL); ++ EXT_KEY(ZIHINTPAUSE); ++ EXT_KEY(ZIMOP); ++ EXT_KEY(ZKND); ++ EXT_KEY(ZKNE); ++ EXT_KEY(ZKNH); ++ EXT_KEY(ZKSED); ++ EXT_KEY(ZKSH); ++ EXT_KEY(ZKT); ++ EXT_KEY(ZTSO); ++ ++ if (has_vector()) { ++ EXT_KEY(ZVBB); ++ EXT_KEY(ZVBC); ++ EXT_KEY(ZVE32F); ++ EXT_KEY(ZVE32X); ++ EXT_KEY(ZVE64D); ++ EXT_KEY(ZVE64F); ++ EXT_KEY(ZVE64X); ++ EXT_KEY(ZVFH); ++ EXT_KEY(ZVFHMIN); ++ EXT_KEY(ZVKB); ++ EXT_KEY(ZVKG); ++ EXT_KEY(ZVKNED); ++ EXT_KEY(ZVKNHA); ++ EXT_KEY(ZVKNHB); ++ EXT_KEY(ZVKSED); ++ EXT_KEY(ZVKSH); ++ EXT_KEY(ZVKT); ++ } ++ ++ if (has_fpu()) { ++ EXT_KEY(ZCD); ++ EXT_KEY(ZCF); ++ EXT_KEY(ZFA); ++ EXT_KEY(ZFH); ++ EXT_KEY(ZFHMIN); ++ } ++ ++ if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM)) ++ EXT_KEY(SUPM); ++#undef EXT_KEY ++ } ++ ++ /* Now turn off reporting features if any CPU is missing it. */ ++ pair->value &= ~missing; ++} ++ ++static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext) ++{ ++ struct riscv_hwprobe pair; ++ ++ hwprobe_isa_ext0(&pair, cpus); ++ return (pair.value & ext); ++} ++ ++static u64 hwprobe_misaligned(const struct cpumask *cpus) ++{ ++ int cpu; ++ u64 perf = -1ULL; ++ ++ for_each_cpu(cpu, cpus) { ++ int this_perf = per_cpu(misaligned_access_speed, cpu); ++ ++ if (perf == -1ULL) ++ perf = this_perf; ++ ++ if (perf != this_perf) { ++ perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN; ++ break; ++ } ++ } ++ ++ if (perf == -1ULL) ++ return RISCV_HWPROBE_MISALIGNED_UNKNOWN; ++ ++ return perf; ++} ++ ++static void hwprobe_one_pair(struct riscv_hwprobe *pair, ++ const struct cpumask *cpus) ++{ ++ switch (pair->key) { ++ case RISCV_HWPROBE_KEY_MVENDORID: ++ case RISCV_HWPROBE_KEY_MARCHID: ++ case RISCV_HWPROBE_KEY_MIMPID: ++ hwprobe_arch_id(pair, cpus); ++ break; ++ /* ++ * The kernel already assumes that the base single-letter ISA ++ * extensions are supported on all harts, and only supports the ++ * IMA base, so just cheat a bit here and tell that to ++ * userspace. ++ */ ++ case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: ++ pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA; ++ break; ++ ++ case RISCV_HWPROBE_KEY_IMA_EXT_0: ++ hwprobe_isa_ext0(pair, cpus); ++ break; ++ ++ case RISCV_HWPROBE_KEY_CPUPERF_0: ++ pair->value = hwprobe_misaligned(cpus); ++ break; ++ ++ case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE: ++ pair->value = 0; ++ if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ)) ++ pair->value = riscv_cboz_block_size; ++ break; ++ case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS: ++ pair->value = user_max_virt_addr(); ++ break; ++ ++ case RISCV_HWPROBE_KEY_TIME_CSR_FREQ: ++ pair->value = riscv_timebase; ++ break; ++ ++ /* ++ * For forward compatibility, unknown keys don't fail the whole ++ * call, but get their element key set to -1 and value set to 0 ++ * indicating they're unrecognized. ++ */ ++ default: ++ pair->key = -1; ++ pair->value = 0; ++ break; ++ } ++} ++ ++static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, ++ size_t pair_count, size_t cpusetsize, ++ unsigned long __user *cpus_user, ++ unsigned int flags) ++{ ++ size_t out; ++ int ret; ++ cpumask_t cpus; ++ ++ /* Check the reserved flags. */ ++ if (flags != 0) ++ return -EINVAL; ++ ++ /* ++ * The interface supports taking in a CPU mask, and returns values that ++ * are consistent across that mask. Allow userspace to specify NULL and ++ * 0 as a shortcut to all online CPUs. ++ */ ++ cpumask_clear(&cpus); ++ if (!cpusetsize && !cpus_user) { ++ cpumask_copy(&cpus, cpu_online_mask); ++ } else { ++ if (cpusetsize > cpumask_size()) ++ cpusetsize = cpumask_size(); ++ ++ ret = copy_from_user(&cpus, cpus_user, cpusetsize); ++ if (ret) ++ return -EFAULT; ++ ++ /* ++ * Userspace must provide at least one online CPU, without that ++ * there's no way to define what is supported. ++ */ ++ cpumask_and(&cpus, &cpus, cpu_online_mask); ++ if (cpumask_empty(&cpus)) ++ return -EINVAL; ++ } ++ ++ for (out = 0; out < pair_count; out++, pairs++) { ++ struct riscv_hwprobe pair; ++ ++ if (get_user(pair.key, &pairs->key)) ++ return -EFAULT; ++ ++ pair.value = 0; ++ hwprobe_one_pair(&pair, &cpus); ++ ret = put_user(pair.key, &pairs->key); ++ if (ret == 0) ++ ret = put_user(pair.value, &pairs->value); ++ ++ if (ret) ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++#ifdef CONFIG_MMU ++ ++static int __init init_hwprobe_vdso_data(void) ++{ ++ struct vdso_data *vd = __arch_get_k_vdso_data(); ++ struct arch_vdso_data *avd = &vd->arch_data; ++ u64 id_bitsmash = 0; ++ struct riscv_hwprobe pair; ++ int key; ++ ++ /* ++ * Initialize vDSO data with the answers for the "all CPUs" case, to ++ * save a syscall in the common case. ++ */ ++ for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) { ++ pair.key = key; ++ hwprobe_one_pair(&pair, cpu_online_mask); ++ ++ WARN_ON_ONCE(pair.key < 0); ++ ++ avd->all_cpu_hwprobe_values[key] = pair.value; ++ /* ++ * Smash together the vendor, arch, and impl IDs to see if ++ * they're all 0 or any negative. ++ */ ++ if (key <= RISCV_HWPROBE_KEY_MIMPID) ++ id_bitsmash |= pair.value; ++ } ++ ++ /* ++ * If the arch, vendor, and implementation ID are all the same across ++ * all harts, then assume all CPUs are the same, and allow the vDSO to ++ * answer queries for arbitrary masks. However if all values are 0 (not ++ * populated) or any value returns -1 (varies across CPUs), then the ++ * vDSO should defer to the kernel for exotic cpu masks. ++ */ ++ avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; ++ return 0; ++} ++ ++arch_initcall_sync(init_hwprobe_vdso_data); ++ ++#endif /* CONFIG_MMU */ ++ ++SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, ++ size_t, pair_count, size_t, cpusetsize, unsigned long __user *, ++ cpus, unsigned int, flags) ++{ ++ return do_riscv_hwprobe(pairs, pair_count, cpusetsize, ++ cpus, flags); ++} +diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c +index 473159b5f303..f1c1416a9f1e 100644 +--- a/arch/riscv/kernel/sys_riscv.c ++++ b/arch/riscv/kernel/sys_riscv.c +@@ -7,15 +7,7 @@ + + #include + #include +-#include +-#include +-#include +-#include +-#include +-#include +-#include + #include +-#include + + static long riscv_sys_mmap(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, +@@ -77,265 +69,6 @@ SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, + return 0; + } + +-/* +- * The hwprobe interface, for allowing userspace to probe to see which features +- * are supported by the hardware. See Documentation/riscv/hwprobe.rst for more +- * details. +- */ +-static void hwprobe_arch_id(struct riscv_hwprobe *pair, +- const struct cpumask *cpus) +-{ +- u64 id = -1ULL; +- bool first = true; +- int cpu; +- +- for_each_cpu(cpu, cpus) { +- u64 cpu_id; +- +- switch (pair->key) { +- case RISCV_HWPROBE_KEY_MVENDORID: +- cpu_id = riscv_cached_mvendorid(cpu); +- break; +- case RISCV_HWPROBE_KEY_MIMPID: +- cpu_id = riscv_cached_mimpid(cpu); +- break; +- case RISCV_HWPROBE_KEY_MARCHID: +- cpu_id = riscv_cached_marchid(cpu); +- break; +- } +- +- if (first) { +- id = cpu_id; +- first = false; +- } +- +- /* +- * If there's a mismatch for the given set, return -1 in the +- * value. +- */ +- if (id != cpu_id) { +- id = -1ULL; +- break; +- } +- } +- +- pair->value = id; +-} +- +-static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, +- const struct cpumask *cpus) +-{ +- int cpu; +- u64 missing = 0; +- +- pair->value = 0; +- if (has_fpu()) +- pair->value |= RISCV_HWPROBE_IMA_FD; +- +- if (riscv_isa_extension_available(NULL, c)) +- pair->value |= RISCV_HWPROBE_IMA_C; +- +- if (has_vector()) +- pair->value |= RISCV_HWPROBE_IMA_V; +- +- /* +- * Loop through and record extensions that 1) anyone has, and 2) anyone +- * doesn't have. +- */ +- for_each_cpu(cpu, cpus) { +- struct riscv_isainfo *isainfo = &hart_isa[cpu]; +- +- if (riscv_isa_extension_available(isainfo->isa, ZBA)) +- pair->value |= RISCV_HWPROBE_EXT_ZBA; +- else +- missing |= RISCV_HWPROBE_EXT_ZBA; +- +- if (riscv_isa_extension_available(isainfo->isa, ZBB)) +- pair->value |= RISCV_HWPROBE_EXT_ZBB; +- else +- missing |= RISCV_HWPROBE_EXT_ZBB; +- +- if (riscv_isa_extension_available(isainfo->isa, ZBS)) +- pair->value |= RISCV_HWPROBE_EXT_ZBS; +- else +- missing |= RISCV_HWPROBE_EXT_ZBS; +- } +- +- /* Now turn off reporting features if any CPU is missing it. */ +- pair->value &= ~missing; +-} +- +-static u64 hwprobe_misaligned(const struct cpumask *cpus) +-{ +- int cpu; +- u64 perf = -1ULL; +- +- for_each_cpu(cpu, cpus) { +- int this_perf = per_cpu(misaligned_access_speed, cpu); +- +- if (perf == -1ULL) +- perf = this_perf; +- +- if (perf != this_perf) { +- perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN; +- break; +- } +- } +- +- if (perf == -1ULL) +- return RISCV_HWPROBE_MISALIGNED_UNKNOWN; +- +- return perf; +-} +- +-static void hwprobe_one_pair(struct riscv_hwprobe *pair, +- const struct cpumask *cpus) +-{ +- switch (pair->key) { +- case RISCV_HWPROBE_KEY_MVENDORID: +- case RISCV_HWPROBE_KEY_MARCHID: +- case RISCV_HWPROBE_KEY_MIMPID: +- hwprobe_arch_id(pair, cpus); +- break; +- /* +- * The kernel already assumes that the base single-letter ISA +- * extensions are supported on all harts, and only supports the +- * IMA base, so just cheat a bit here and tell that to +- * userspace. +- */ +- case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: +- pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA; +- break; +- +- case RISCV_HWPROBE_KEY_IMA_EXT_0: +- hwprobe_isa_ext0(pair, cpus); +- break; +- +- case RISCV_HWPROBE_KEY_CPUPERF_0: +- pair->value = hwprobe_misaligned(cpus); +- break; +- +- /* +- * For forward compatibility, unknown keys don't fail the whole +- * call, but get their element key set to -1 and value set to 0 +- * indicating they're unrecognized. +- */ +- default: +- pair->key = -1; +- pair->value = 0; +- break; +- } +-} +- +-static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, +- size_t pair_count, size_t cpu_count, +- unsigned long __user *cpus_user, +- unsigned int flags) +-{ +- size_t out; +- int ret; +- cpumask_t cpus; +- +- /* Check the reserved flags. */ +- if (flags != 0) +- return -EINVAL; +- +- /* +- * The interface supports taking in a CPU mask, and returns values that +- * are consistent across that mask. Allow userspace to specify NULL and +- * 0 as a shortcut to all online CPUs. +- */ +- cpumask_clear(&cpus); +- if (!cpu_count && !cpus_user) { +- cpumask_copy(&cpus, cpu_online_mask); +- } else { +- if (cpu_count > cpumask_size()) +- cpu_count = cpumask_size(); +- +- ret = copy_from_user(&cpus, cpus_user, cpu_count); +- if (ret) +- return -EFAULT; +- +- /* +- * Userspace must provide at least one online CPU, without that +- * there's no way to define what is supported. +- */ +- cpumask_and(&cpus, &cpus, cpu_online_mask); +- if (cpumask_empty(&cpus)) +- return -EINVAL; +- } +- +- for (out = 0; out < pair_count; out++, pairs++) { +- struct riscv_hwprobe pair; +- +- if (get_user(pair.key, &pairs->key)) +- return -EFAULT; +- +- pair.value = 0; +- hwprobe_one_pair(&pair, &cpus); +- ret = put_user(pair.key, &pairs->key); +- if (ret == 0) +- ret = put_user(pair.value, &pairs->value); +- +- if (ret) +- return -EFAULT; +- } +- +- return 0; +-} +- +-#ifdef CONFIG_MMU +- +-static int __init init_hwprobe_vdso_data(void) +-{ +- struct vdso_data *vd = __arch_get_k_vdso_data(); +- struct arch_vdso_data *avd = &vd->arch_data; +- u64 id_bitsmash = 0; +- struct riscv_hwprobe pair; +- int key; +- +- /* +- * Initialize vDSO data with the answers for the "all CPUs" case, to +- * save a syscall in the common case. +- */ +- for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) { +- pair.key = key; +- hwprobe_one_pair(&pair, cpu_online_mask); +- +- WARN_ON_ONCE(pair.key < 0); +- +- avd->all_cpu_hwprobe_values[key] = pair.value; +- /* +- * Smash together the vendor, arch, and impl IDs to see if +- * they're all 0 or any negative. +- */ +- if (key <= RISCV_HWPROBE_KEY_MIMPID) +- id_bitsmash |= pair.value; +- } +- +- /* +- * If the arch, vendor, and implementation ID are all the same across +- * all harts, then assume all CPUs are the same, and allow the vDSO to +- * answer queries for arbitrary masks. However if all values are 0 (not +- * populated) or any value returns -1 (varies across CPUs), then the +- * vDSO should defer to the kernel for exotic cpu masks. +- */ +- avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; +- return 0; +-} +- +-arch_initcall_sync(init_hwprobe_vdso_data); +- +-#endif /* CONFIG_MMU */ +- +-SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, +- size_t, pair_count, size_t, cpu_count, unsigned long __user *, +- cpus, unsigned int, flags) +-{ +- return do_riscv_hwprobe(pairs, pair_count, cpu_count, +- cpus, flags); +-} +- + /* Not defined using SYSCALL_DEFINE0 to avoid error injection */ + asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *__unused) + { +diff --git a/arch/riscv/kernel/vdso/hwprobe.c b/arch/riscv/kernel/vdso/hwprobe.c +index cadf725ef798..1e926e4b5881 100644 +--- a/arch/riscv/kernel/vdso/hwprobe.c ++++ b/arch/riscv/kernel/vdso/hwprobe.c +@@ -3,26 +3,22 @@ + * Copyright 2023 Rivos, Inc + */ + ++#include + #include + #include + #include + + extern int riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, +- size_t cpu_count, unsigned long *cpus, ++ size_t cpusetsize, unsigned long *cpus, + unsigned int flags); + +-/* Add a prototype to avoid -Wmissing-prototypes warning. */ +-int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, +- size_t cpu_count, unsigned long *cpus, +- unsigned int flags); +- +-int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, +- size_t cpu_count, unsigned long *cpus, +- unsigned int flags) ++static int riscv_vdso_get_values(struct riscv_hwprobe *pairs, size_t pair_count, ++ size_t cpusetsize, unsigned long *cpus, ++ unsigned int flags) + { + const struct vdso_data *vd = __arch_get_vdso_data(); + const struct arch_vdso_data *avd = &vd->arch_data; +- bool all_cpus = !cpu_count && !cpus; ++ bool all_cpus = !cpusetsize && !cpus; + struct riscv_hwprobe *p = pairs; + struct riscv_hwprobe *end = pairs + pair_count; + +@@ -33,7 +29,7 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, + * masks. + */ + if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus)) +- return riscv_hwprobe(pairs, pair_count, cpu_count, cpus, flags); ++ return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags); + + /* This is something we can handle, fill out the pairs. */ + while (p < end) { +@@ -50,3 +46,71 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, + + return 0; + } ++ ++static int riscv_vdso_get_cpus(struct riscv_hwprobe *pairs, size_t pair_count, ++ size_t cpusetsize, unsigned long *cpus, ++ unsigned int flags) ++{ ++ const struct vdso_data *vd = __arch_get_vdso_data(); ++ const struct arch_vdso_data *avd = &vd->arch_data; ++ struct riscv_hwprobe *p = pairs; ++ struct riscv_hwprobe *end = pairs + pair_count; ++ unsigned char *c = (unsigned char *)cpus; ++ bool empty_cpus = true; ++ bool clear_all = false; ++ int i; ++ ++ if (!cpusetsize || !cpus) ++ return -EINVAL; ++ ++ for (i = 0; i < cpusetsize; i++) { ++ if (c[i]) { ++ empty_cpus = false; ++ break; ++ } ++ } ++ ++ if (empty_cpus || flags != RISCV_HWPROBE_WHICH_CPUS || !avd->homogeneous_cpus) ++ return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags); ++ ++ while (p < end) { ++ if (riscv_hwprobe_key_is_valid(p->key)) { ++ struct riscv_hwprobe t = { ++ .key = p->key, ++ .value = avd->all_cpu_hwprobe_values[p->key], ++ }; ++ ++ if (!riscv_hwprobe_pair_cmp(&t, p)) ++ clear_all = true; ++ } else { ++ clear_all = true; ++ p->key = -1; ++ p->value = 0; ++ } ++ p++; ++ } ++ ++ if (clear_all) { ++ for (i = 0; i < cpusetsize; i++) ++ c[i] = 0; ++ } ++ ++ return 0; ++} ++ ++/* Add a prototype to avoid -Wmissing-prototypes warning. */ ++int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, ++ size_t cpusetsize, unsigned long *cpus, ++ unsigned int flags); ++ ++int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, ++ size_t cpusetsize, unsigned long *cpus, ++ unsigned int flags) ++{ ++ if (flags & RISCV_HWPROBE_WHICH_CPUS) ++ return riscv_vdso_get_cpus(pairs, pair_count, cpusetsize, ++ cpus, flags); ++ ++ return riscv_vdso_get_values(pairs, pair_count, cpusetsize, ++ cpus, flags); ++} diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c -index 8d92fb6c522c..2c1f6c87d955 100644 +index 81886fc36ed6..a0d1841c4388 100644 --- a/arch/riscv/kernel/vector.c +++ b/arch/riscv/kernel/vector.c @@ -83,7 +83,8 @@ static bool insn_is_vector(u32 insn_buf) @@ -22263,6 +37929,673 @@ index 8d92fb6c522c..2c1f6c87d955 100644 datap = kzalloc(riscv_v_vsize, GFP_KERNEL); if (!datap) return -ENOMEM; +@@ -136,8 +137,11 @@ bool riscv_v_first_use_handler(struct pt_regs *regs) + u32 __user *epc = (u32 __user *)regs->epc; + u32 insn = (u32)regs->badaddr; + ++ if (!has_vector()) ++ return false; ++ + /* Do not handle if V is not supported, or disabled */ +- if (!(ELF_HWCAP & COMPAT_HWCAP_ISA_V)) ++ if (!riscv_v_vstate_ctrl_user_allowed()) + return false; + + /* If V has been enabled then it is not the first-use trap */ +diff --git a/arch/riscv/kernel/vendor_extensions.c b/arch/riscv/kernel/vendor_extensions.c +new file mode 100644 +index 000000000000..aeb8839d2f8a +--- /dev/null ++++ b/arch/riscv/kernel/vendor_extensions.c +@@ -0,0 +1,56 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright 2024 Rivos, Inc ++ */ ++ ++#include ++#include ++#include ++ ++#include ++#include ++ ++struct riscv_isa_vendor_ext_data_list *riscv_isa_vendor_ext_list[] = { ++#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_ANDES ++ &riscv_isa_vendor_ext_list_andes, ++#endif ++}; ++ ++const size_t riscv_isa_vendor_ext_list_size = ARRAY_SIZE(riscv_isa_vendor_ext_list); ++ ++/** ++ * __riscv_isa_vendor_extension_available() - Check whether given vendor ++ * extension is available or not. ++ * ++ * @cpu: check if extension is available on this cpu ++ * @vendor: vendor that the extension is a member of ++ * @bit: bit position of the desired extension ++ * Return: true or false ++ * ++ * NOTE: When cpu is -1, will check if extension is available on all cpus ++ */ ++bool __riscv_isa_vendor_extension_available(int cpu, unsigned long vendor, unsigned int bit) ++{ ++ struct riscv_isavendorinfo *bmap; ++ struct riscv_isavendorinfo *cpu_bmap; ++ ++ switch (vendor) { ++ #ifdef CONFIG_RISCV_ISA_VENDOR_EXT_ANDES ++ case ANDES_VENDOR_ID: ++ bmap = &riscv_isa_vendor_ext_list_andes.all_harts_isa_bitmap; ++ cpu_bmap = &riscv_isa_vendor_ext_list_andes.per_hart_isa_bitmap[cpu]; ++ break; ++ #endif ++ default: ++ return false; ++ } ++ ++ if (cpu != -1) ++ bmap = &cpu_bmap[cpu]; ++ ++ if (bit >= RISCV_ISA_VENDOR_EXT_MAX) ++ return false; ++ ++ return test_bit(bit, bmap->isa) ? true : false; ++} ++EXPORT_SYMBOL_GPL(__riscv_isa_vendor_extension_available); +diff --git a/arch/riscv/kernel/vendor_extensions/Makefile b/arch/riscv/kernel/vendor_extensions/Makefile +new file mode 100644 +index 000000000000..6a61aed944f1 +--- /dev/null ++++ b/arch/riscv/kernel/vendor_extensions/Makefile +@@ -0,0 +1,3 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++ ++obj-$(CONFIG_RISCV_ISA_VENDOR_EXT_ANDES) += andes.o +diff --git a/arch/riscv/kernel/vendor_extensions/andes.c b/arch/riscv/kernel/vendor_extensions/andes.c +new file mode 100644 +index 000000000000..4d8dfc974f00 +--- /dev/null ++++ b/arch/riscv/kernel/vendor_extensions/andes.c +@@ -0,0 +1,18 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++ ++#include ++#include ++#include ++ ++#include ++#include ++ ++/* All Andes vendor extensions supported in Linux */ ++const struct riscv_isa_ext_data riscv_isa_vendor_ext_andes[] = { ++ __RISCV_ISA_EXT_DATA(xandespmu, RISCV_ISA_VENDOR_EXT_XANDESPMU), ++}; ++ ++struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_andes = { ++ .ext_data_count = ARRAY_SIZE(riscv_isa_vendor_ext_andes), ++ .ext_data = riscv_isa_vendor_ext_andes, ++}; +diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c +index 74bb27440527..596209f1a6ff 100644 +--- a/arch/riscv/kvm/aia.c ++++ b/arch/riscv/kvm/aia.c +@@ -10,12 +10,12 @@ + #include + #include + #include ++#include + #include + #include + #include + #include +-#include +-#include ++#include + + struct aia_hgei_control { + raw_spinlock_t lock; +@@ -394,6 +394,8 @@ int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner, + { + int ret = -ENOENT; + unsigned long flags; ++ const struct imsic_global_config *gc; ++ const struct imsic_local_config *lc; + struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu); + + if (!kvm_riscv_aia_available() || !hgctrl) +@@ -409,11 +411,14 @@ int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner, + + raw_spin_unlock_irqrestore(&hgctrl->lock, flags); + +- /* TODO: To be updated later by AIA IMSIC HW guest file support */ +- if (hgei_va) +- *hgei_va = NULL; +- if (hgei_pa) +- *hgei_pa = 0; ++ gc = imsic_get_global_config(); ++ lc = (gc) ? per_cpu_ptr(gc->local, cpu) : NULL; ++ if (lc && ret > 0) { ++ if (hgei_va) ++ *hgei_va = lc->msi_va + (ret * IMSIC_MMIO_PAGE_SZ); ++ if (hgei_pa) ++ *hgei_pa = lc->msi_pa + (ret * IMSIC_MMIO_PAGE_SZ); ++ } + + return ret; + } +@@ -600,9 +605,11 @@ void kvm_riscv_aia_disable(void) + int kvm_riscv_aia_init(void) + { + int rc; ++ const struct imsic_global_config *gc; + + if (!riscv_isa_extension_available(NULL, SxAIA)) + return -ENODEV; ++ gc = imsic_get_global_config(); + + /* Figure-out number of bits in HGEIE */ + csr_write(CSR_HGEIE, -1UL); +@@ -614,17 +621,17 @@ int kvm_riscv_aia_init(void) + /* + * Number of usable HGEI lines should be minimum of per-HART + * IMSIC guest files and number of bits in HGEIE +- * +- * TODO: To be updated later by AIA IMSIC HW guest file support + */ +- kvm_riscv_aia_nr_hgei = 0; ++ if (gc) ++ kvm_riscv_aia_nr_hgei = min((ulong)kvm_riscv_aia_nr_hgei, ++ BIT(gc->guest_index_bits) - 1); ++ else ++ kvm_riscv_aia_nr_hgei = 0; + +- /* +- * Find number of guest MSI IDs +- * +- * TODO: To be updated later by AIA IMSIC HW guest file support +- */ ++ /* Find number of guest MSI IDs */ + kvm_riscv_aia_max_ids = IMSIC_MAX_ID; ++ if (gc && kvm_riscv_aia_nr_hgei) ++ kvm_riscv_aia_max_ids = gc->nr_guest_ids + 1; + + /* Initialize guest external interrupt line management */ + rc = aia_hgei_init(); +diff --git a/arch/riscv/kvm/aia_aplic.c b/arch/riscv/kvm/aia_aplic.c +index 9d5b04c971c4..f59d1c0c8c43 100644 +--- a/arch/riscv/kvm/aia_aplic.c ++++ b/arch/riscv/kvm/aia_aplic.c +@@ -7,12 +7,12 @@ + * Anup Patel + */ + ++#include + #include + #include + #include + #include + #include +-#include + + struct aplic_irq { + raw_spinlock_t lock; +diff --git a/arch/riscv/kvm/aia_device.c b/arch/riscv/kvm/aia_device.c +index 5cd407c6a8e4..39cd26af5a69 100644 +--- a/arch/riscv/kvm/aia_device.c ++++ b/arch/riscv/kvm/aia_device.c +@@ -8,9 +8,9 @@ + */ + + #include ++#include + #include + #include +-#include + + static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx) + { +diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c +index c1585444f856..a8085cd8215e 100644 +--- a/arch/riscv/kvm/aia_imsic.c ++++ b/arch/riscv/kvm/aia_imsic.c +@@ -9,13 +9,13 @@ + + #include + #include ++#include + #include + #include + #include + #include + #include + #include +-#include + + #define IMSIC_MAX_EIX (IMSIC_MAX_ID / BITS_PER_TYPE(u64)) + +diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c +index 48ae0d4b3932..225a435d9c9a 100644 +--- a/arch/riscv/kvm/main.c ++++ b/arch/riscv/kvm/main.c +@@ -11,7 +11,7 @@ + #include + #include + #include +-#include ++#include + #include + + long kvm_arch_dev_ioctl(struct file *filp, +diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c +index 44bc324aeeb0..23c0e82b5103 100644 +--- a/arch/riscv/kvm/tlb.c ++++ b/arch/riscv/kvm/tlb.c +@@ -12,7 +12,7 @@ + #include + #include + #include +-#include ++#include + #include + + #define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL) +diff --git a/arch/riscv/kvm/vcpu_fp.c b/arch/riscv/kvm/vcpu_fp.c +index 08ba48a395aa..030904d82b58 100644 +--- a/arch/riscv/kvm/vcpu_fp.c ++++ b/arch/riscv/kvm/vcpu_fp.c +@@ -11,7 +11,7 @@ + #include + #include + #include +-#include ++#include + + #ifdef CONFIG_FPU + void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu) +diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c +index d520b25d8561..9e7e755163a9 100644 +--- a/arch/riscv/kvm/vcpu_onereg.c ++++ b/arch/riscv/kvm/vcpu_onereg.c +@@ -13,7 +13,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + +diff --git a/arch/riscv/kvm/vcpu_vector.c b/arch/riscv/kvm/vcpu_vector.c +index b430cbb69521..b339a2682f25 100644 +--- a/arch/riscv/kvm/vcpu_vector.c ++++ b/arch/riscv/kvm/vcpu_vector.c +@@ -11,7 +11,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + +diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile +index 26cb2502ecf8..183bf2097d57 100644 +--- a/arch/riscv/lib/Makefile ++++ b/arch/riscv/lib/Makefile +@@ -9,5 +9,6 @@ lib-y += strncmp.o + lib-$(CONFIG_MMU) += uaccess.o + lib-$(CONFIG_64BIT) += tishift.o + lib-$(CONFIG_RISCV_ISA_ZICBOZ) += clear_page.o ++lib-$(CONFIG_RISCV_ISA_ZBC) += crc32.o + + obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o +diff --git a/arch/riscv/lib/crc32.c b/arch/riscv/lib/crc32.c +new file mode 100644 +index 000000000000..d7dc599af3ef +--- /dev/null ++++ b/arch/riscv/lib/crc32.c +@@ -0,0 +1,294 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Accelerated CRC32 implementation with Zbc extension. ++ * ++ * Copyright (C) 2024 Intel Corporation ++ */ ++ ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++/* ++ * Refer to https://www.corsix.org/content/barrett-reduction-polynomials for ++ * better understanding of how this math works. ++ * ++ * let "+" denotes polynomial add (XOR) ++ * let "-" denotes polynomial sub (XOR) ++ * let "*" denotes polynomial multiplication ++ * let "/" denotes polynomial floor division ++ * let "S" denotes source data, XLEN bit wide ++ * let "P" denotes CRC32 polynomial ++ * let "T" denotes 2^(XLEN+32) ++ * let "QT" denotes quotient of T/P, with the bit for 2^XLEN being implicit ++ * ++ * crc32(S, P) ++ * => S * (2^32) - S * (2^32) / P * P ++ * => lowest 32 bits of: S * (2^32) / P * P ++ * => lowest 32 bits of: S * (2^32) * (T / P) / T * P ++ * => lowest 32 bits of: S * (2^32) * quotient / T * P ++ * => lowest 32 bits of: S * quotient / 2^XLEN * P ++ * => lowest 32 bits of: (clmul_high_part(S, QT) + S) * P ++ * => clmul_low_part(clmul_high_part(S, QT) + S, P) ++ * ++ * In terms of below implementations, the BE case is more intuitive, since the ++ * higher order bit sits at more significant position. ++ */ ++ ++#if __riscv_xlen == 64 ++/* Slide by XLEN bits per iteration */ ++# define STEP_ORDER 3 ++ ++/* Each below polynomial quotient has an implicit bit for 2^XLEN */ ++ ++/* Polynomial quotient of (2^(XLEN+32))/CRC32_POLY, in LE format */ ++# define CRC32_POLY_QT_LE 0x5a72d812fb808b20 ++ ++/* Polynomial quotient of (2^(XLEN+32))/CRC32C_POLY, in LE format */ ++# define CRC32C_POLY_QT_LE 0xa434f61c6f5389f8 ++ ++/* Polynomial quotient of (2^(XLEN+32))/CRC32_POLY, in BE format, it should be ++ * the same as the bit-reversed version of CRC32_POLY_QT_LE ++ */ ++# define CRC32_POLY_QT_BE 0x04d101df481b4e5a ++ ++static inline u64 crc32_le_prep(u32 crc, unsigned long const *ptr) ++{ ++ return (u64)crc ^ (__force u64)__cpu_to_le64(*ptr); ++} ++ ++static inline u32 crc32_le_zbc(unsigned long s, u32 poly, unsigned long poly_qt) ++{ ++ u32 crc; ++ ++ /* We don't have a "clmulrh" insn, so use clmul + slli instead. */ ++ asm volatile (".option push\n" ++ ".option arch,+zbc\n" ++ "clmul %0, %1, %2\n" ++ "slli %0, %0, 1\n" ++ "xor %0, %0, %1\n" ++ "clmulr %0, %0, %3\n" ++ "srli %0, %0, 32\n" ++ ".option pop\n" ++ : "=&r" (crc) ++ : "r" (s), ++ "r" (poly_qt), ++ "r" ((u64)poly << 32) ++ :); ++ return crc; ++} ++ ++static inline u64 crc32_be_prep(u32 crc, unsigned long const *ptr) ++{ ++ return ((u64)crc << 32) ^ (__force u64)__cpu_to_be64(*ptr); ++} ++ ++#elif __riscv_xlen == 32 ++# define STEP_ORDER 2 ++/* Each quotient should match the upper half of its analog in RV64 */ ++# define CRC32_POLY_QT_LE 0xfb808b20 ++# define CRC32C_POLY_QT_LE 0x6f5389f8 ++# define CRC32_POLY_QT_BE 0x04d101df ++ ++static inline u32 crc32_le_prep(u32 crc, unsigned long const *ptr) ++{ ++ return crc ^ (__force u32)__cpu_to_le32(*ptr); ++} ++ ++static inline u32 crc32_le_zbc(unsigned long s, u32 poly, unsigned long poly_qt) ++{ ++ u32 crc; ++ ++ /* We don't have a "clmulrh" insn, so use clmul + slli instead. */ ++ asm volatile (".option push\n" ++ ".option arch,+zbc\n" ++ "clmul %0, %1, %2\n" ++ "slli %0, %0, 1\n" ++ "xor %0, %0, %1\n" ++ "clmulr %0, %0, %3\n" ++ ".option pop\n" ++ : "=&r" (crc) ++ : "r" (s), ++ "r" (poly_qt), ++ "r" (poly) ++ :); ++ return crc; ++} ++ ++static inline u32 crc32_be_prep(u32 crc, unsigned long const *ptr) ++{ ++ return crc ^ (__force u32)__cpu_to_be32(*ptr); ++} ++ ++#else ++# error "Unexpected __riscv_xlen" ++#endif ++ ++static inline u32 crc32_be_zbc(unsigned long s) ++{ ++ u32 crc; ++ ++ asm volatile (".option push\n" ++ ".option arch,+zbc\n" ++ "clmulh %0, %1, %2\n" ++ "xor %0, %0, %1\n" ++ "clmul %0, %0, %3\n" ++ ".option pop\n" ++ : "=&r" (crc) ++ : "r" (s), ++ "r" (CRC32_POLY_QT_BE), ++ "r" (CRC32_POLY_BE) ++ :); ++ return crc; ++} ++ ++#define STEP (1 << STEP_ORDER) ++#define OFFSET_MASK (STEP - 1) ++ ++typedef u32 (*fallback)(u32 crc, unsigned char const *p, size_t len); ++ ++static inline u32 crc32_le_unaligned(u32 crc, unsigned char const *p, ++ size_t len, u32 poly, ++ unsigned long poly_qt) ++{ ++ size_t bits = len * 8; ++ unsigned long s = 0; ++ u32 crc_low = 0; ++ ++ for (int i = 0; i < len; i++) ++ s = ((unsigned long)*p++ << (__riscv_xlen - 8)) | (s >> 8); ++ ++ s ^= (unsigned long)crc << (__riscv_xlen - bits); ++ if (__riscv_xlen == 32 || len < sizeof(u32)) ++ crc_low = crc >> bits; ++ ++ crc = crc32_le_zbc(s, poly, poly_qt); ++ crc ^= crc_low; ++ ++ return crc; ++} ++ ++static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p, ++ size_t len, u32 poly, ++ unsigned long poly_qt, ++ fallback crc_fb) ++{ ++ size_t offset, head_len, tail_len; ++ unsigned long const *p_ul; ++ unsigned long s; ++ ++ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, ++ RISCV_ISA_EXT_ZBC, 1) ++ : : : : legacy); ++ ++ /* Handle the unaligned head. */ ++ offset = (unsigned long)p & OFFSET_MASK; ++ if (offset && len) { ++ head_len = min(STEP - offset, len); ++ crc = crc32_le_unaligned(crc, p, head_len, poly, poly_qt); ++ p += head_len; ++ len -= head_len; ++ } ++ ++ tail_len = len & OFFSET_MASK; ++ len = len >> STEP_ORDER; ++ p_ul = (unsigned long const *)p; ++ ++ for (int i = 0; i < len; i++) { ++ s = crc32_le_prep(crc, p_ul); ++ crc = crc32_le_zbc(s, poly, poly_qt); ++ p_ul++; ++ } ++ ++ /* Handle the tail bytes. */ ++ p = (unsigned char const *)p_ul; ++ if (tail_len) ++ crc = crc32_le_unaligned(crc, p, tail_len, poly, poly_qt); ++ ++ return crc; ++ ++legacy: ++ return crc_fb(crc, p, len); ++} ++ ++u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) ++{ ++ return crc32_le_generic(crc, p, len, CRC32_POLY_LE, CRC32_POLY_QT_LE, ++ crc32_le_base); ++} ++ ++u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) ++{ ++ return crc32_le_generic(crc, p, len, CRC32C_POLY_LE, ++ CRC32C_POLY_QT_LE, __crc32c_le_base); ++} ++ ++static inline u32 crc32_be_unaligned(u32 crc, unsigned char const *p, ++ size_t len) ++{ ++ size_t bits = len * 8; ++ unsigned long s = 0; ++ u32 crc_low = 0; ++ ++ s = 0; ++ for (int i = 0; i < len; i++) ++ s = *p++ | (s << 8); ++ ++ if (__riscv_xlen == 32 || len < sizeof(u32)) { ++ s ^= crc >> (32 - bits); ++ crc_low = crc << bits; ++ } else { ++ s ^= (unsigned long)crc << (bits - 32); ++ } ++ ++ crc = crc32_be_zbc(s); ++ crc ^= crc_low; ++ ++ return crc; ++} ++ ++u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) ++{ ++ size_t offset, head_len, tail_len; ++ unsigned long const *p_ul; ++ unsigned long s; ++ ++ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, ++ RISCV_ISA_EXT_ZBC, 1) ++ : : : : legacy); ++ ++ /* Handle the unaligned head. */ ++ offset = (unsigned long)p & OFFSET_MASK; ++ if (offset && len) { ++ head_len = min(STEP - offset, len); ++ crc = crc32_be_unaligned(crc, p, head_len); ++ p += head_len; ++ len -= head_len; ++ } ++ ++ tail_len = len & OFFSET_MASK; ++ len = len >> STEP_ORDER; ++ p_ul = (unsigned long const *)p; ++ ++ for (int i = 0; i < len; i++) { ++ s = crc32_be_prep(crc, p_ul); ++ crc = crc32_be_zbc(s); ++ p_ul++; ++ } ++ ++ /* Handle the tail bytes. */ ++ p = (unsigned char const *)p_ul; ++ if (tail_len) ++ crc = crc32_be_unaligned(crc, p, tail_len); ++ ++ return crc; ++ ++legacy: ++ return crc32_be_base(crc, p, len); ++} +diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c +index f1387272a551..55a34f2020a8 100644 +--- a/arch/riscv/mm/cacheflush.c ++++ b/arch/riscv/mm/cacheflush.c +@@ -3,7 +3,9 @@ + * Copyright (C) 2017 SiFive + */ + ++#include + #include ++#include + #include + + #ifdef CONFIG_SMP +@@ -124,13 +126,24 @@ void __init riscv_init_cbo_blocksizes(void) + unsigned long cbom_hartid, cboz_hartid; + u32 cbom_block_size = 0, cboz_block_size = 0; + struct device_node *node; ++ struct acpi_table_header *rhct; ++ acpi_status status; ++ ++ if (acpi_disabled) { ++ for_each_of_cpu_node(node) { ++ /* set block-size for cbom and/or cboz extension if available */ ++ cbo_get_block_size(node, "riscv,cbom-block-size", ++ &cbom_block_size, &cbom_hartid); ++ cbo_get_block_size(node, "riscv,cboz-block-size", ++ &cboz_block_size, &cboz_hartid); ++ } ++ } else { ++ status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct); ++ if (ACPI_FAILURE(status)) ++ return; + +- for_each_of_cpu_node(node) { +- /* set block-size for cbom and/or cboz extension if available */ +- cbo_get_block_size(node, "riscv,cbom-block-size", +- &cbom_block_size, &cbom_hartid); +- cbo_get_block_size(node, "riscv,cboz-block-size", +- &cboz_block_size, &cboz_hartid); ++ acpi_get_cbo_block_size(rhct, &cbom_block_size, &cboz_block_size, NULL); ++ acpi_put_table((struct acpi_table_header *)rhct); + } + + if (cbom_block_size) diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c index a77342eb3489..32031a7d96d4 100644 --- a/arch/riscv/mm/dma-noncoherent.c @@ -22387,6 +38720,18 @@ index 324e8cd9b502..a9f4af9f7f3f 100644 for (i = 0; i < nr_ptes_in_range; ++i) { local_flush_tlb_page_asid(start, asid); start += stride; +diff --git a/arch/sw_64/Kconfig b/arch/sw_64/Kconfig +index 7af20faf9f43..2224afa91cc8 100644 +--- a/arch/sw_64/Kconfig ++++ b/arch/sw_64/Kconfig +@@ -430,7 +430,6 @@ source "kernel/livepatch/Kconfig" + config NUMA + bool "NUMA Support" + depends on SMP && !FLATMEM +- select ACPI_NUMA if ACPI + select OF_NUMA + help + Say Y to compile the kernel to support NUMA (Non-Uniform Memory diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 551829884734..dcfaa3812306 100644 --- a/arch/x86/include/asm/hw_irq.h @@ -22414,6 +38759,1991 @@ index 8e1ef5345b7a..a67bb8f982bd 100644 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); paravirt_tlb_remove_table(tlb, virt_to_page(pud)); } +diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig +index c4746869d67b..34dfe1430d75 100644 +--- a/drivers/acpi/Kconfig ++++ b/drivers/acpi/Kconfig +@@ -281,7 +281,7 @@ config ACPI_CPPC_LIB + + config ACPI_PROCESSOR + tristate "Processor" +- depends on X86 || IA64 || ARM64 || LOONGARCH ++ depends on X86 || IA64 || ARM64 || LOONGARCH || RISCV + select ACPI_PROCESSOR_IDLE + select ACPI_CPU_FREQ_PSS if X86 || IA64 || LOONGARCH + select THERMAL +diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile +index eaa09bf52f17..d367e649714f 100644 +--- a/drivers/acpi/Makefile ++++ b/drivers/acpi/Makefile +@@ -37,7 +37,7 @@ acpi-$(CONFIG_ACPI_SLEEP) += proc.o + # ACPI Bus and Device Drivers + # + acpi-y += bus.o glue.o +-acpi-y += scan.o ++acpi-y += scan.o mipi-disco-img.o + acpi-y += resource.o + acpi-y += acpi_processor.o + acpi-y += processor_core.o +diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c +index 6b1b31eabfdd..96d70d640a21 100644 +--- a/drivers/acpi/acpi_apd.c ++++ b/drivers/acpi/acpi_apd.c +@@ -40,8 +40,9 @@ struct apd_private_data { + const struct apd_device_desc *dev_desc; + }; + +-#if defined(CONFIG_X86_AMD_PLATFORM_DEVICE) || \ +-defined(CONFIG_ARM64) || defined(CONFIG_SW64) ++#if defined(CONFIG_X86_AMD_PLATFORM_DEVICE) || defined(CONFIG_ARM64) || \ ++ defined(CONFIG_SW64) || defined(CONFIG_RISCV) ++ + #define APD_ADDR(desc) ((unsigned long)&desc) + + static int acpi_apd_setup(struct apd_private_data *pdata) +@@ -205,6 +206,18 @@ static int sw64_acpi_apd_setup(struct apd_private_data *pdata) + } + #endif /* CONFIG_SW64 */ + ++#ifdef CONFIG_RISCV ++static const struct apd_device_desc sophgo_i2c_desc = { ++ .setup = acpi_apd_setup, ++ .fixed_clk_rate = 100000000, ++}; ++ ++static const struct apd_device_desc sophgo_spi_desc = { ++ .setup = acpi_apd_setup, ++ .fixed_clk_rate = 250000000, ++}; ++#endif /* CONFIG_RISCV */ ++ + #endif + + /* +@@ -277,6 +290,10 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { + #ifdef CONFIG_SW64 + { "SUNW0005", APD_ADDR(sunway_i2c_desc) }, + { "SUNW0008", APD_ADDR(sunway_spi_desc) }, ++#endif ++#ifdef CONFIG_RISCV ++ { "SOPH0003", APD_ADDR(sophgo_i2c_desc) }, ++ { "SOPH0004", APD_ADDR(sophgo_spi_desc) }, + #endif + { } + }; +diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c +index 98a2ab3b6844..1a418424d250 100644 +--- a/drivers/acpi/acpi_lpss.c ++++ b/drivers/acpi/acpi_lpss.c +@@ -579,25 +579,26 @@ static struct device *acpi_lpss_find_device(const char *hid, const char *uid) + static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle) + { + struct acpi_handle_list dep_devices; +- acpi_status status; ++ bool ret = false; + int i; + + if (!acpi_has_method(adev->handle, "_DEP")) + return false; + +- status = acpi_evaluate_reference(adev->handle, "_DEP", NULL, +- &dep_devices); +- if (ACPI_FAILURE(status)) { ++ if (!acpi_evaluate_reference(adev->handle, "_DEP", NULL, &dep_devices)) { + dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n"); + return false; + } + + for (i = 0; i < dep_devices.count; i++) { +- if (dep_devices.handles[i] == handle) +- return true; ++ if (dep_devices.handles[i] == handle) { ++ ret = true; ++ break; ++ } + } + +- return false; ++ acpi_handle_list_free(&dep_devices); ++ return ret; + } + + static void acpi_lpss_link_consumer(struct device *dev1, +diff --git a/drivers/acpi/arm64/dma.c b/drivers/acpi/arm64/dma.c +index 93d796531af3..52b2abf88689 100644 +--- a/drivers/acpi/arm64/dma.c ++++ b/drivers/acpi/arm64/dma.c +@@ -8,7 +8,6 @@ void acpi_arch_dma_setup(struct device *dev) + { + int ret; + u64 end, mask; +- u64 size = 0; + const struct bus_dma_region *map = NULL; + + /* +@@ -23,31 +22,23 @@ void acpi_arch_dma_setup(struct device *dev) + } + + if (dev->coherent_dma_mask) +- size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); ++ end = dev->coherent_dma_mask; + else +- size = 1ULL << 32; ++ end = (1ULL << 32) - 1; + + ret = acpi_dma_get_range(dev, &map); + if (!ret && map) { +- const struct bus_dma_region *r = map; +- +- for (end = 0; r->size; r++) { +- if (r->dma_start + r->size - 1 > end) +- end = r->dma_start + r->size - 1; +- } +- +- size = end + 1; ++ end = dma_range_map_max(map); + dev->dma_range_map = map; + } + + if (ret == -ENODEV) +- ret = iort_dma_get_ranges(dev, &size); ++ ret = iort_dma_get_ranges(dev, &end); + if (!ret) { + /* + * Limit coherent and dma mask based on size retrieved from + * firmware. + */ +- end = size - 1; + mask = DMA_BIT_MASK(ilog2(end) + 1); + dev->bus_dma_limit = end; + dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask); +diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c +index ebb52d2b22de..678431a862bc 100644 +--- a/drivers/acpi/arm64/iort.c ++++ b/drivers/acpi/arm64/iort.c +@@ -1466,7 +1466,7 @@ int iort_iommu_configure_id(struct device *dev, const u32 *input_id) + { return -ENODEV; } + #endif + +-static int nc_dma_get_range(struct device *dev, u64 *size) ++static int nc_dma_get_range(struct device *dev, u64 *limit) + { + struct acpi_iort_node *node; + struct acpi_iort_named_component *ncomp; +@@ -1483,13 +1483,13 @@ static int nc_dma_get_range(struct device *dev, u64 *size) + return -EINVAL; + } + +- *size = ncomp->memory_address_limit >= 64 ? U64_MAX : +- 1ULL<memory_address_limit; ++ *limit = ncomp->memory_address_limit >= 64 ? U64_MAX : ++ (1ULL << ncomp->memory_address_limit) - 1; + + return 0; + } + +-static int rc_dma_get_range(struct device *dev, u64 *size) ++static int rc_dma_get_range(struct device *dev, u64 *limit) + { + struct acpi_iort_node *node; + struct acpi_iort_root_complex *rc; +@@ -1507,8 +1507,8 @@ static int rc_dma_get_range(struct device *dev, u64 *size) + return -EINVAL; + } + +- *size = rc->memory_address_limit >= 64 ? U64_MAX : +- 1ULL<memory_address_limit; ++ *limit = rc->memory_address_limit >= 64 ? U64_MAX : ++ (1ULL << rc->memory_address_limit) - 1; + + return 0; + } +@@ -1516,16 +1516,16 @@ static int rc_dma_get_range(struct device *dev, u64 *size) + /** + * iort_dma_get_ranges() - Look up DMA addressing limit for the device + * @dev: device to lookup +- * @size: DMA range size result pointer ++ * @limit: DMA limit result pointer + * + * Return: 0 on success, an error otherwise. + */ +-int iort_dma_get_ranges(struct device *dev, u64 *size) ++int iort_dma_get_ranges(struct device *dev, u64 *limit) + { + if (dev_is_pci(dev)) +- return rc_dma_get_range(dev, size); ++ return rc_dma_get_range(dev, limit); + else +- return nc_dma_get_range(dev, size); ++ return nc_dma_get_range(dev, limit); + } + + static void __init acpi_iort_register_irq(int hwirq, const char *name, +diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c +index e0800d1f8ff7..5ae0f1aa57ce 100644 +--- a/drivers/acpi/bus.c ++++ b/drivers/acpi/bus.c +@@ -1179,6 +1179,9 @@ static int __init acpi_bus_init_irq(void) + message = "SWPIC"; + break; + #endif ++ case ACPI_IRQ_MODEL_RINTC: ++ message = "RINTC"; ++ break; + default: + pr_info("Unknown interrupt routing model\n"); + return -ENODEV; +@@ -1435,6 +1438,7 @@ static int __init acpi_init(void) + acpi_hest_init(); + acpi_ghes_init(); + acpi_arm_init(); ++ acpi_riscv_init(); + acpi_scan_init(); + acpi_ec_init(); + acpi_debugfs_init(); +diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h +index 0592aebe0c39..510b2ee3c71c 100644 +--- a/drivers/acpi/internal.h ++++ b/drivers/acpi/internal.h +@@ -274,4 +274,12 @@ void acpi_init_lpit(void); + static inline void acpi_init_lpit(void) { } + #endif + ++/*-------------------------------------------------------------------------- ++ ACPI _CRS CSI-2 and MIPI DisCo for Imaging ++ -------------------------------------------------------------------------- */ ++ ++void acpi_mipi_check_crs_csi2(acpi_handle handle); ++void acpi_mipi_scan_crs_csi2(void); ++void acpi_mipi_crs_csi2_cleanup(void); ++ + #endif /* _ACPI_INTERNAL_H_ */ +diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c +new file mode 100644 +index 000000000000..91281c8cb4f2 +--- /dev/null ++++ b/drivers/acpi/mipi-disco-img.c +@@ -0,0 +1,292 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * MIPI DisCo for Imaging support. ++ * ++ * Copyright (C) 2023 Intel Corporation ++ * ++ * Support MIPI DisCo for Imaging by parsing ACPI _CRS CSI-2 records defined in ++ * Section 6.4.3.8.2.4 "Camera Serial Interface (CSI-2) Connection Resource ++ * Descriptor" of ACPI 6.5. ++ * ++ * The implementation looks for the information in the ACPI namespace (CSI-2 ++ * resource descriptors in _CRS) and constructs software nodes compatible with ++ * Documentation/firmware-guide/acpi/dsd/graph.rst to represent the CSI-2 ++ * connection graph. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "internal.h" ++ ++static LIST_HEAD(acpi_mipi_crs_csi2_list); ++ ++static void acpi_mipi_data_tag(acpi_handle handle, void *context) ++{ ++} ++ ++/* Connection data extracted from one _CRS CSI-2 resource descriptor. */ ++struct crs_csi2_connection { ++ struct list_head entry; ++ struct acpi_resource_csi2_serialbus csi2_data; ++ acpi_handle remote_handle; ++ char remote_name[]; ++}; ++ ++/* Data extracted from _CRS CSI-2 resource descriptors for one device. */ ++struct crs_csi2 { ++ struct list_head entry; ++ acpi_handle handle; ++ struct acpi_device_software_nodes *swnodes; ++ struct list_head connections; ++ u32 port_count; ++}; ++ ++struct csi2_resources_walk_data { ++ acpi_handle handle; ++ struct list_head connections; ++}; ++ ++static acpi_status parse_csi2_resource(struct acpi_resource *res, void *context) ++{ ++ struct csi2_resources_walk_data *crwd = context; ++ struct acpi_resource_csi2_serialbus *csi2_res; ++ struct acpi_resource_source *csi2_res_src; ++ u16 csi2_res_src_length; ++ struct crs_csi2_connection *conn; ++ acpi_handle remote_handle; ++ ++ if (res->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) ++ return AE_OK; ++ ++ csi2_res = &res->data.csi2_serial_bus; ++ ++ if (csi2_res->type != ACPI_RESOURCE_SERIAL_TYPE_CSI2) ++ return AE_OK; ++ ++ csi2_res_src = &csi2_res->resource_source; ++ if (ACPI_FAILURE(acpi_get_handle(NULL, csi2_res_src->string_ptr, ++ &remote_handle))) { ++ acpi_handle_debug(crwd->handle, ++ "unable to find resource source\n"); ++ return AE_OK; ++ } ++ csi2_res_src_length = csi2_res_src->string_length; ++ if (!csi2_res_src_length) { ++ acpi_handle_debug(crwd->handle, ++ "invalid resource source string length\n"); ++ return AE_OK; ++ } ++ ++ conn = kmalloc(struct_size(conn, remote_name, csi2_res_src_length + 1), ++ GFP_KERNEL); ++ if (!conn) ++ return AE_OK; ++ ++ conn->csi2_data = *csi2_res; ++ strscpy(conn->remote_name, csi2_res_src->string_ptr, csi2_res_src_length); ++ conn->csi2_data.resource_source.string_ptr = conn->remote_name; ++ conn->remote_handle = remote_handle; ++ ++ list_add(&conn->entry, &crwd->connections); ++ ++ return AE_OK; ++} ++ ++static struct crs_csi2 *acpi_mipi_add_crs_csi2(acpi_handle handle, ++ struct list_head *list) ++{ ++ struct crs_csi2 *csi2; ++ ++ csi2 = kzalloc(sizeof(*csi2), GFP_KERNEL); ++ if (!csi2) ++ return NULL; ++ ++ csi2->handle = handle; ++ INIT_LIST_HEAD(&csi2->connections); ++ csi2->port_count = 1; ++ ++ if (ACPI_FAILURE(acpi_attach_data(handle, acpi_mipi_data_tag, csi2))) { ++ kfree(csi2); ++ return NULL; ++ } ++ ++ list_add(&csi2->entry, list); ++ ++ return csi2; ++} ++ ++static struct crs_csi2 *acpi_mipi_get_crs_csi2(acpi_handle handle) ++{ ++ struct crs_csi2 *csi2; ++ ++ if (ACPI_FAILURE(acpi_get_data_full(handle, acpi_mipi_data_tag, ++ (void **)&csi2, NULL))) ++ return NULL; ++ ++ return csi2; ++} ++ ++static void csi_csr2_release_connections(struct list_head *list) ++{ ++ struct crs_csi2_connection *conn, *conn_tmp; ++ ++ list_for_each_entry_safe(conn, conn_tmp, list, entry) { ++ list_del(&conn->entry); ++ kfree(conn); ++ } ++} ++ ++static void acpi_mipi_del_crs_csi2(struct crs_csi2 *csi2) ++{ ++ list_del(&csi2->entry); ++ acpi_detach_data(csi2->handle, acpi_mipi_data_tag); ++ kfree(csi2->swnodes); ++ csi_csr2_release_connections(&csi2->connections); ++ kfree(csi2); ++} ++ ++/** ++ * acpi_mipi_check_crs_csi2 - Look for CSI-2 resources in _CRS ++ * @handle: Device object handle to evaluate _CRS for. ++ * ++ * Find all CSI-2 resource descriptors in the given device's _CRS ++ * and collect them into a list. ++ */ ++void acpi_mipi_check_crs_csi2(acpi_handle handle) ++{ ++ struct csi2_resources_walk_data crwd = { ++ .handle = handle, ++ .connections = LIST_HEAD_INIT(crwd.connections), ++ }; ++ struct crs_csi2 *csi2; ++ ++ /* ++ * Avoid allocating _CRS CSI-2 objects for devices without any CSI-2 ++ * resource descriptions in _CRS to reduce overhead. ++ */ ++ acpi_walk_resources(handle, METHOD_NAME__CRS, parse_csi2_resource, &crwd); ++ if (list_empty(&crwd.connections)) ++ return; ++ ++ /* ++ * Create a _CRS CSI-2 entry to store the extracted connection ++ * information and add it to the global list. ++ */ ++ csi2 = acpi_mipi_add_crs_csi2(handle, &acpi_mipi_crs_csi2_list); ++ if (!csi2) { ++ csi_csr2_release_connections(&crwd.connections); ++ return; /* Nothing really can be done about this. */ ++ } ++ ++ list_replace(&crwd.connections, &csi2->connections); ++} ++ ++#define NO_CSI2_PORT (UINT_MAX - 1) ++ ++static void alloc_crs_csi2_swnodes(struct crs_csi2 *csi2) ++{ ++ size_t port_count = csi2->port_count; ++ struct acpi_device_software_nodes *swnodes; ++ size_t alloc_size; ++ unsigned int i; ++ ++ /* ++ * Allocate memory for ports, node pointers (number of nodes + ++ * 1 (guardian), nodes (root + number of ports * 2 (because for ++ * every port there is an endpoint)). ++ */ ++ if (check_mul_overflow(sizeof(*swnodes->ports) + ++ sizeof(*swnodes->nodes) * 2 + ++ sizeof(*swnodes->nodeptrs) * 2, ++ port_count, &alloc_size) || ++ check_add_overflow(sizeof(*swnodes) + ++ sizeof(*swnodes->nodes) + ++ sizeof(*swnodes->nodeptrs) * 2, ++ alloc_size, &alloc_size)) { ++ acpi_handle_info(csi2->handle, ++ "too many _CRS CSI-2 resource handles (%zu)", ++ port_count); ++ return; ++ } ++ ++ swnodes = kmalloc(alloc_size, GFP_KERNEL); ++ if (!swnodes) ++ return; ++ ++ swnodes->ports = (struct acpi_device_software_node_port *)(swnodes + 1); ++ swnodes->nodes = (struct software_node *)(swnodes->ports + port_count); ++ swnodes->nodeptrs = (const struct software_node **)(swnodes->nodes + 1 + ++ 2 * port_count); ++ swnodes->num_ports = port_count; ++ ++ for (i = 0; i < 2 * port_count + 1; i++) ++ swnodes->nodeptrs[i] = &swnodes->nodes[i]; ++ ++ swnodes->nodeptrs[i] = NULL; ++ ++ for (i = 0; i < port_count; i++) ++ swnodes->ports[i].port_nr = NO_CSI2_PORT; ++ ++ csi2->swnodes = swnodes; ++} ++ ++/** ++ * acpi_mipi_scan_crs_csi2 - Create ACPI _CRS CSI-2 software nodes ++ * ++ * Note that this function must be called before any struct acpi_device objects ++ * are bound to any ACPI drivers or scan handlers, so it cannot assume the ++ * existence of struct acpi_device objects for every device present in the ACPI ++ * namespace. ++ * ++ * acpi_scan_lock in scan.c must be held when calling this function. ++ */ ++void acpi_mipi_scan_crs_csi2(void) ++{ ++ struct crs_csi2 *csi2; ++ LIST_HEAD(aux_list); ++ ++ /* Count references to each ACPI handle in the CSI-2 connection graph. */ ++ list_for_each_entry(csi2, &acpi_mipi_crs_csi2_list, entry) { ++ struct crs_csi2_connection *conn; ++ ++ list_for_each_entry(conn, &csi2->connections, entry) { ++ struct crs_csi2 *remote_csi2; ++ ++ csi2->port_count++; ++ ++ remote_csi2 = acpi_mipi_get_crs_csi2(conn->remote_handle); ++ if (remote_csi2) { ++ remote_csi2->port_count++; ++ continue; ++ } ++ /* ++ * The remote endpoint has no _CRS CSI-2 list entry yet, ++ * so create one for it and add it to the list. ++ */ ++ acpi_mipi_add_crs_csi2(conn->remote_handle, &aux_list); ++ } ++ } ++ list_splice(&aux_list, &acpi_mipi_crs_csi2_list); ++ ++ /* Allocate software nodes for representing the CSI-2 information. */ ++ list_for_each_entry(csi2, &acpi_mipi_crs_csi2_list, entry) ++ alloc_crs_csi2_swnodes(csi2); ++} ++ ++/** ++ * acpi_mipi_crs_csi2_cleanup - Free _CRS CSI-2 temporary data ++ */ ++void acpi_mipi_crs_csi2_cleanup(void) ++{ ++ struct crs_csi2 *csi2, *csi2_tmp; ++ ++ list_for_each_entry_safe(csi2, csi2_tmp, &acpi_mipi_crs_csi2_list, entry) ++ acpi_mipi_del_crs_csi2(csi2); ++} +diff --git a/drivers/acpi/numa/Kconfig b/drivers/acpi/numa/Kconfig +index 67d1f40bfa9f..f33194d1e43f 100644 +--- a/drivers/acpi/numa/Kconfig ++++ b/drivers/acpi/numa/Kconfig +@@ -1,9 +1,6 @@ + # SPDX-License-Identifier: GPL-2.0 + config ACPI_NUMA +- bool "NUMA support" +- depends on NUMA +- depends on (X86 || IA64 || ARM64 || LOONGARCH || SW64) +- default y if IA64 || ARM64 ++ def_bool NUMA && !X86 + + config ACPI_HMAT + bool "ACPI Heterogeneous Memory Attribute Table Support" +diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c +index aad85ccae2e0..e146e1b46806 100644 +--- a/drivers/acpi/numa/srat.c ++++ b/drivers/acpi/numa/srat.c +@@ -165,6 +165,19 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header) + } + } + break; ++ ++ case ACPI_SRAT_TYPE_RINTC_AFFINITY: ++ { ++ struct acpi_srat_rintc_affinity *p = ++ (struct acpi_srat_rintc_affinity *)header; ++ pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n", ++ p->acpi_processor_uid, ++ p->proximity_domain, ++ (p->flags & ACPI_SRAT_RINTC_ENABLED) ? ++ "enabled" : "disabled"); ++ } ++ break; ++ + default: + pr_warn("Found unsupported SRAT entry (type = 0x%x)\n", + header->type); +@@ -206,7 +219,7 @@ int __init srat_disabled(void) + return acpi_numa < 0; + } + +-#if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH) || defined(CONFIG_SW64) ++#if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH) || defined(CONFIG_SW64) || defined(CONFIG_RISCV) + /* + * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for + * I/O localities since SRAT does not list them. I/O localities are +@@ -466,6 +479,21 @@ acpi_parse_memory_affinity(union acpi_subtable_headers * header, + return 0; + } + ++static int __init ++acpi_parse_rintc_affinity(union acpi_subtable_headers *header, ++ const unsigned long end) ++{ ++ struct acpi_srat_rintc_affinity *rintc_affinity; ++ ++ rintc_affinity = (struct acpi_srat_rintc_affinity *)header; ++ acpi_table_print_srat_entry(&header->common); ++ ++ /* let architecture-dependent part to do it */ ++ acpi_numa_rintc_affinity_init(rintc_affinity); ++ ++ return 0; ++} ++ + static int __init acpi_parse_srat(struct acpi_table_header *table) + { + struct acpi_table_srat *srat = (struct acpi_table_srat *)table; +@@ -501,7 +529,7 @@ int __init acpi_numa_init(void) + + /* SRAT: System Resource Affinity Table */ + if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { +- struct acpi_subtable_proc srat_proc[4]; ++ struct acpi_subtable_proc srat_proc[5]; + + memset(srat_proc, 0, sizeof(srat_proc)); + srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY; +@@ -512,6 +540,8 @@ int __init acpi_numa_init(void) + srat_proc[2].handler = acpi_parse_gicc_affinity; + srat_proc[3].id = ACPI_SRAT_TYPE_GENERIC_AFFINITY; + srat_proc[3].handler = acpi_parse_gi_affinity; ++ srat_proc[4].id = ACPI_SRAT_TYPE_RINTC_AFFINITY; ++ srat_proc[4].handler = acpi_parse_rintc_affinity; + + acpi_table_parse_entries_array(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), +diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c +index aa1038b8aec4..b727db968f33 100644 +--- a/drivers/acpi/pci_link.c ++++ b/drivers/acpi/pci_link.c +@@ -748,6 +748,8 @@ static int acpi_pci_link_add(struct acpi_device *device, + if (result) + kfree(link); + ++ acpi_dev_clear_dependencies(device); ++ + return result < 0 ? result : 1; + } + +diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c +index f5cb96ff8768..82f049627306 100644 +--- a/drivers/acpi/pci_mcfg.c ++++ b/drivers/acpi/pci_mcfg.c +@@ -225,6 +225,23 @@ static struct mcfg_fixup mcfg_quirks[] = { + SW64_ECAM_QUIRK("SUNWAY ", 1, 0x06, &sunway_pci_ecam_ops), + SW64_ECAM_QUIRK("SUNWAY ", 1, 0x07, &sunway_pci_ecam_ops), + #endif /* SW64 */ ++ ++#ifdef CONFIG_RISCV ++#define RISCV_ECAM_MCFG(table_id, seg) \ ++ { "SOPHGO", table_id, 1, seg, MCFG_BUS_ANY, &sophgo_pci_ecam_ops } ++ ++ RISCV_ECAM_MCFG("2044 ", 0), ++ RISCV_ECAM_MCFG("2044 ", 1), ++ RISCV_ECAM_MCFG("2044 ", 2), ++ RISCV_ECAM_MCFG("2044 ", 3), ++ RISCV_ECAM_MCFG("2044 ", 4), ++ RISCV_ECAM_MCFG("2044 ", 5), ++ RISCV_ECAM_MCFG("2044 ", 6), ++ RISCV_ECAM_MCFG("2044 ", 7), ++ RISCV_ECAM_MCFG("2044 ", 8), ++ RISCV_ECAM_MCFG("2044 ", 9), ++#endif /* RISCV */ ++ + }; + + static char mcfg_oem_id[ACPI_OEM_ID_SIZE]; +diff --git a/drivers/acpi/riscv/Makefile b/drivers/acpi/riscv/Makefile +index 8b3b126e0b94..a96fdf1e2cb8 100644 +--- a/drivers/acpi/riscv/Makefile ++++ b/drivers/acpi/riscv/Makefile +@@ -1,2 +1,4 @@ + # SPDX-License-Identifier: GPL-2.0-only +-obj-y += rhct.o ++obj-y += rhct.o init.o irq.o ++obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o ++obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o +diff --git a/drivers/acpi/riscv/cppc.c b/drivers/acpi/riscv/cppc.c +new file mode 100644 +index 000000000000..4cdff387deff +--- /dev/null ++++ b/drivers/acpi/riscv/cppc.c +@@ -0,0 +1,157 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Implement CPPC FFH helper routines for RISC-V. ++ * ++ * Copyright (C) 2024 Ventana Micro Systems Inc. ++ */ ++ ++#include ++#include ++#include ++ ++#define SBI_EXT_CPPC 0x43505043 ++ ++/* CPPC interfaces defined in SBI spec */ ++#define SBI_CPPC_PROBE 0x0 ++#define SBI_CPPC_READ 0x1 ++#define SBI_CPPC_READ_HI 0x2 ++#define SBI_CPPC_WRITE 0x3 ++ ++/* RISC-V FFH definitions from RISC-V FFH spec */ ++#define FFH_CPPC_TYPE(r) (((r) & GENMASK_ULL(63, 60)) >> 60) ++#define FFH_CPPC_SBI_REG(r) ((r) & GENMASK(31, 0)) ++#define FFH_CPPC_CSR_NUM(r) ((r) & GENMASK(11, 0)) ++ ++#define FFH_CPPC_SBI 0x1 ++#define FFH_CPPC_CSR 0x2 ++ ++struct sbi_cppc_data { ++ u64 val; ++ u32 reg; ++ struct sbiret ret; ++}; ++ ++static bool cppc_ext_present; ++ ++static int __init sbi_cppc_init(void) ++{ ++ if (sbi_spec_version >= sbi_mk_version(2, 0) && ++ sbi_probe_extension(SBI_EXT_CPPC) > 0) { ++ pr_info("SBI CPPC extension detected\n"); ++ cppc_ext_present = true; ++ } else { ++ pr_info("SBI CPPC extension NOT detected!!\n"); ++ cppc_ext_present = false; ++ } ++ ++ return 0; ++} ++device_initcall(sbi_cppc_init); ++ ++static void sbi_cppc_read(void *read_data) ++{ ++ struct sbi_cppc_data *data = (struct sbi_cppc_data *)read_data; ++ ++ data->ret = sbi_ecall(SBI_EXT_CPPC, SBI_CPPC_READ, ++ data->reg, 0, 0, 0, 0, 0); ++} ++ ++static void sbi_cppc_write(void *write_data) ++{ ++ struct sbi_cppc_data *data = (struct sbi_cppc_data *)write_data; ++ ++ data->ret = sbi_ecall(SBI_EXT_CPPC, SBI_CPPC_WRITE, ++ data->reg, data->val, 0, 0, 0, 0); ++} ++ ++static void cppc_ffh_csr_read(void *read_data) ++{ ++ struct sbi_cppc_data *data = (struct sbi_cppc_data *)read_data; ++ ++ switch (data->reg) { ++ /* Support only TIME CSR for now */ ++ case CSR_TIME: ++ data->ret.value = csr_read(CSR_TIME); ++ data->ret.error = 0; ++ break; ++ default: ++ data->ret.error = -EINVAL; ++ break; ++ } ++} ++ ++static void cppc_ffh_csr_write(void *write_data) ++{ ++ struct sbi_cppc_data *data = (struct sbi_cppc_data *)write_data; ++ ++ data->ret.error = -EINVAL; ++} ++ ++/* ++ * Refer to drivers/acpi/cppc_acpi.c for the description of the functions ++ * below. ++ */ ++bool cpc_ffh_supported(void) ++{ ++ return true; ++} ++ ++int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val) ++{ ++ struct sbi_cppc_data data; ++ ++ if (WARN_ON_ONCE(irqs_disabled())) ++ return -EPERM; ++ ++ if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_SBI) { ++ if (!cppc_ext_present) ++ return -EINVAL; ++ ++ data.reg = FFH_CPPC_SBI_REG(reg->address); ++ ++ smp_call_function_single(cpu, sbi_cppc_read, &data, 1); ++ ++ *val = data.ret.value; ++ ++ return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0; ++ } else if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_CSR) { ++ data.reg = FFH_CPPC_CSR_NUM(reg->address); ++ ++ smp_call_function_single(cpu, cppc_ffh_csr_read, &data, 1); ++ ++ *val = data.ret.value; ++ ++ return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0; ++ } ++ ++ return -EINVAL; ++} ++ ++int cpc_write_ffh(int cpu, struct cpc_reg *reg, u64 val) ++{ ++ struct sbi_cppc_data data; ++ ++ if (WARN_ON_ONCE(irqs_disabled())) ++ return -EPERM; ++ ++ if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_SBI) { ++ if (!cppc_ext_present) ++ return -EINVAL; ++ ++ data.reg = FFH_CPPC_SBI_REG(reg->address); ++ data.val = val; ++ ++ smp_call_function_single(cpu, sbi_cppc_write, &data, 1); ++ ++ return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0; ++ } else if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_CSR) { ++ data.reg = FFH_CPPC_CSR_NUM(reg->address); ++ data.val = val; ++ ++ smp_call_function_single(cpu, cppc_ffh_csr_write, &data, 1); ++ ++ return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0; ++ } ++ ++ return -EINVAL; ++} +diff --git a/drivers/acpi/riscv/cpuidle.c b/drivers/acpi/riscv/cpuidle.c +new file mode 100644 +index 000000000000..624f9bbdb58c +--- /dev/null ++++ b/drivers/acpi/riscv/cpuidle.c +@@ -0,0 +1,81 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2024, Ventana Micro Systems Inc ++ * Author: Sunil V L ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define RISCV_FFH_LPI_TYPE_MASK GENMASK_ULL(63, 60) ++#define RISCV_FFH_LPI_RSVD_MASK GENMASK_ULL(59, 32) ++ ++#define RISCV_FFH_LPI_TYPE_SBI BIT_ULL(60) ++ ++static int acpi_cpu_init_idle(unsigned int cpu) ++{ ++ int i; ++ struct acpi_lpi_state *lpi; ++ struct acpi_processor *pr = per_cpu(processors, cpu); ++ ++ if (unlikely(!pr || !pr->flags.has_lpi)) ++ return -EINVAL; ++ ++ if (!riscv_sbi_hsm_is_supported()) ++ return -ENODEV; ++ ++ if (pr->power.count <= 1) ++ return -ENODEV; ++ ++ for (i = 1; i < pr->power.count; i++) { ++ u32 state; ++ ++ lpi = &pr->power.lpi_states[i]; ++ ++ /* ++ * Validate Entry Method as per FFH spec. ++ * bits[63:60] should be 0x1 ++ * bits[59:32] should be 0x0 ++ * bits[31:0] represent a SBI power_state ++ */ ++ if (((lpi->address & RISCV_FFH_LPI_TYPE_MASK) != RISCV_FFH_LPI_TYPE_SBI) || ++ (lpi->address & RISCV_FFH_LPI_RSVD_MASK)) { ++ pr_warn("Invalid LPI entry method %#llx\n", lpi->address); ++ return -EINVAL; ++ } ++ ++ state = lpi->address; ++ if (!riscv_sbi_suspend_state_is_valid(state)) { ++ pr_warn("Invalid SBI power state %#x\n", state); ++ return -EINVAL; ++ } ++ } ++ ++ return 0; ++} ++ ++int acpi_processor_ffh_lpi_probe(unsigned int cpu) ++{ ++ return acpi_cpu_init_idle(cpu); ++} ++ ++int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) ++{ ++ u32 state = lpi->address; ++ ++ if (state & SBI_HSM_SUSP_NON_RET_BIT) ++ return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend, ++ lpi->index, ++ state); ++ else ++ return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend, ++ lpi->index, ++ state); ++} +diff --git a/drivers/acpi/riscv/init.c b/drivers/acpi/riscv/init.c +new file mode 100644 +index 000000000000..5ef97905a727 +--- /dev/null ++++ b/drivers/acpi/riscv/init.c +@@ -0,0 +1,13 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2023-2024, Ventana Micro Systems Inc ++ * Author: Sunil V L ++ */ ++ ++#include ++#include "init.h" ++ ++void __init acpi_riscv_init(void) ++{ ++ riscv_acpi_init_gsi_mapping(); ++} +diff --git a/drivers/acpi/riscv/init.h b/drivers/acpi/riscv/init.h +new file mode 100644 +index 000000000000..0b9a07e4031f +--- /dev/null ++++ b/drivers/acpi/riscv/init.h +@@ -0,0 +1,4 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++#include ++ ++void __init riscv_acpi_init_gsi_mapping(void); +diff --git a/drivers/acpi/riscv/irq.c b/drivers/acpi/riscv/irq.c +new file mode 100644 +index 000000000000..cced960c2aef +--- /dev/null ++++ b/drivers/acpi/riscv/irq.c +@@ -0,0 +1,335 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2023-2024, Ventana Micro Systems Inc ++ * Author: Sunil V L ++ */ ++ ++#include ++#include ++#include ++ ++#include "init.h" ++ ++struct riscv_ext_intc_list { ++ acpi_handle handle; ++ u32 gsi_base; ++ u32 nr_irqs; ++ u32 nr_idcs; ++ u32 id; ++ u32 type; ++ struct list_head list; ++}; ++ ++struct acpi_irq_dep_ctx { ++ int rc; ++ unsigned int index; ++ acpi_handle handle; ++}; ++ ++LIST_HEAD(ext_intc_list); ++ ++static int irqchip_cmp_func(const void *in0, const void *in1) ++{ ++ struct acpi_probe_entry *elem0 = (struct acpi_probe_entry *)in0; ++ struct acpi_probe_entry *elem1 = (struct acpi_probe_entry *)in1; ++ ++ return (elem0->type > elem1->type) - (elem0->type < elem1->type); ++} ++ ++/* ++ * On RISC-V, RINTC structures in MADT should be probed before any other ++ * interrupt controller structures and IMSIC before APLIC. The interrupt ++ * controller subtypes in MADT of ACPI spec for RISC-V are defined in ++ * the incremental order like RINTC(24)->IMSIC(25)->APLIC(26)->PLIC(27). ++ * Hence, simply sorting the subtypes in incremental order will ++ * establish the required order. ++ */ ++void arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr) ++{ ++ struct acpi_probe_entry *ape = ap_head; ++ ++ if (nr == 1 || !ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) ++ return; ++ sort(ape, nr, sizeof(*ape), irqchip_cmp_func, NULL); ++} ++ ++static acpi_status riscv_acpi_update_gsi_handle(u32 gsi_base, acpi_handle handle) ++{ ++ struct riscv_ext_intc_list *ext_intc_element; ++ struct list_head *i, *tmp; ++ ++ list_for_each_safe(i, tmp, &ext_intc_list) { ++ ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); ++ if (gsi_base == ext_intc_element->gsi_base) { ++ ext_intc_element->handle = handle; ++ return AE_OK; ++ } ++ } ++ ++ return AE_NOT_FOUND; ++} ++ ++int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base, ++ u32 *id, u32 *nr_irqs, u32 *nr_idcs) ++{ ++ struct riscv_ext_intc_list *ext_intc_element; ++ struct list_head *i; ++ ++ list_for_each(i, &ext_intc_list) { ++ ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); ++ if (ext_intc_element->handle == ACPI_HANDLE_FWNODE(fwnode)) { ++ *gsi_base = ext_intc_element->gsi_base; ++ *id = ext_intc_element->id; ++ *nr_irqs = ext_intc_element->nr_irqs; ++ if (nr_idcs) ++ *nr_idcs = ext_intc_element->nr_idcs; ++ ++ return 0; ++ } ++ } ++ ++ return -ENODEV; ++} ++ ++struct fwnode_handle *riscv_acpi_get_gsi_domain_id(u32 gsi) ++{ ++ struct riscv_ext_intc_list *ext_intc_element; ++ struct acpi_device *adev; ++ struct list_head *i; ++ ++ list_for_each(i, &ext_intc_list) { ++ ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); ++ if (gsi >= ext_intc_element->gsi_base && ++ gsi < (ext_intc_element->gsi_base + ext_intc_element->nr_irqs)) { ++ adev = acpi_fetch_acpi_dev(ext_intc_element->handle); ++ if (!adev) ++ return NULL; ++ ++ return acpi_fwnode_handle(adev); ++ } ++ } ++ ++ return NULL; ++} ++ ++static int __init riscv_acpi_register_ext_intc(u32 gsi_base, u32 nr_irqs, u32 nr_idcs, ++ u32 id, u32 type) ++{ ++ struct riscv_ext_intc_list *ext_intc_element; ++ ++ ext_intc_element = kzalloc(sizeof(*ext_intc_element), GFP_KERNEL); ++ if (!ext_intc_element) ++ return -ENOMEM; ++ ++ ext_intc_element->gsi_base = gsi_base; ++ ext_intc_element->nr_irqs = nr_irqs; ++ ext_intc_element->nr_idcs = nr_idcs; ++ ext_intc_element->id = id; ++ list_add_tail(&ext_intc_element->list, &ext_intc_list); ++ return 0; ++} ++ ++static acpi_status __init riscv_acpi_create_gsi_map(acpi_handle handle, u32 level, ++ void *context, void **return_value) ++{ ++ acpi_status status; ++ u64 gbase; ++ ++ if (!acpi_has_method(handle, "_GSB")) { ++ acpi_handle_err(handle, "_GSB method not found\n"); ++ return AE_ERROR; ++ } ++ ++ status = acpi_evaluate_integer(handle, "_GSB", NULL, &gbase); ++ if (ACPI_FAILURE(status)) { ++ acpi_handle_err(handle, "failed to evaluate _GSB method\n"); ++ return status; ++ } ++ ++ status = riscv_acpi_update_gsi_handle((u32)gbase, handle); ++ if (ACPI_FAILURE(status)) { ++ acpi_handle_err(handle, "failed to find the GSI mapping entry\n"); ++ return status; ++ } ++ ++ return AE_OK; ++} ++ ++static int __init riscv_acpi_aplic_parse_madt(union acpi_subtable_headers *header, ++ const unsigned long end) ++{ ++ struct acpi_madt_aplic *aplic = (struct acpi_madt_aplic *)header; ++ ++ return riscv_acpi_register_ext_intc(aplic->gsi_base, aplic->num_sources, aplic->num_idcs, ++ aplic->id, ACPI_RISCV_IRQCHIP_APLIC); ++} ++ ++static int __init riscv_acpi_plic_parse_madt(union acpi_subtable_headers *header, ++ const unsigned long end) ++{ ++ struct acpi_madt_plic *plic = (struct acpi_madt_plic *)header; ++ ++ return riscv_acpi_register_ext_intc(plic->gsi_base, plic->num_irqs, 0, ++ plic->id, ACPI_RISCV_IRQCHIP_PLIC); ++} ++ ++void __init riscv_acpi_init_gsi_mapping(void) ++{ ++ /* There can be either PLIC or APLIC */ ++ if (acpi_table_parse_madt(ACPI_MADT_TYPE_PLIC, riscv_acpi_plic_parse_madt, 0) > 0) { ++ acpi_get_devices("RSCV0001", riscv_acpi_create_gsi_map, NULL, NULL); ++ return; ++ } ++ ++ if (acpi_table_parse_madt(ACPI_MADT_TYPE_APLIC, riscv_acpi_aplic_parse_madt, 0) > 0) ++ acpi_get_devices("RSCV0002", riscv_acpi_create_gsi_map, NULL, NULL); ++} ++ ++static acpi_handle riscv_acpi_get_gsi_handle(u32 gsi) ++{ ++ struct riscv_ext_intc_list *ext_intc_element; ++ struct list_head *i; ++ ++ list_for_each(i, &ext_intc_list) { ++ ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); ++ if (gsi >= ext_intc_element->gsi_base && ++ gsi < (ext_intc_element->gsi_base + ext_intc_element->nr_irqs)) ++ return ext_intc_element->handle; ++ } ++ ++ return NULL; ++} ++ ++static acpi_status riscv_acpi_irq_get_parent(struct acpi_resource *ares, void *context) ++{ ++ struct acpi_irq_dep_ctx *ctx = context; ++ struct acpi_resource_irq *irq; ++ struct acpi_resource_extended_irq *eirq; ++ ++ switch (ares->type) { ++ case ACPI_RESOURCE_TYPE_IRQ: ++ irq = &ares->data.irq; ++ if (ctx->index >= irq->interrupt_count) { ++ ctx->index -= irq->interrupt_count; ++ return AE_OK; ++ } ++ ctx->handle = riscv_acpi_get_gsi_handle(irq->interrupts[ctx->index]); ++ return AE_CTRL_TERMINATE; ++ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: ++ eirq = &ares->data.extended_irq; ++ if (eirq->producer_consumer == ACPI_PRODUCER) ++ return AE_OK; ++ ++ if (ctx->index >= eirq->interrupt_count) { ++ ctx->index -= eirq->interrupt_count; ++ return AE_OK; ++ } ++ ++ /* Support GSIs only */ ++ if (eirq->resource_source.string_length) ++ return AE_OK; ++ ++ ctx->handle = riscv_acpi_get_gsi_handle(eirq->interrupts[ctx->index]); ++ return AE_CTRL_TERMINATE; ++ } ++ ++ return AE_OK; ++} ++ ++static int riscv_acpi_irq_get_dep(acpi_handle handle, unsigned int index, acpi_handle *gsi_handle) ++{ ++ struct acpi_irq_dep_ctx ctx = {-EINVAL, index, NULL}; ++ ++ if (!gsi_handle) ++ return 0; ++ ++ acpi_walk_resources(handle, METHOD_NAME__CRS, riscv_acpi_irq_get_parent, &ctx); ++ *gsi_handle = ctx.handle; ++ if (*gsi_handle) ++ return 1; ++ ++ return 0; ++} ++ ++static u32 riscv_acpi_add_prt_dep(acpi_handle handle) ++{ ++ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; ++ struct acpi_pci_routing_table *entry; ++ struct acpi_handle_list dep_devices; ++ acpi_handle gsi_handle; ++ acpi_handle link_handle; ++ acpi_status status; ++ u32 count = 0; ++ ++ status = acpi_get_irq_routing_table(handle, &buffer); ++ if (ACPI_FAILURE(status)) { ++ acpi_handle_err(handle, "failed to get IRQ routing table\n"); ++ kfree(buffer.pointer); ++ return 0; ++ } ++ ++ entry = buffer.pointer; ++ while (entry && (entry->length > 0)) { ++ if (entry->source[0]) { ++ acpi_get_handle(handle, entry->source, &link_handle); ++ dep_devices.count = 1; ++ dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL); ++ if (!dep_devices.handles) { ++ acpi_handle_err(handle, "failed to allocate memory\n"); ++ continue; ++ } ++ ++ dep_devices.handles[0] = link_handle; ++ count += acpi_scan_add_dep(handle, &dep_devices); ++ } else { ++ gsi_handle = riscv_acpi_get_gsi_handle(entry->source_index); ++ dep_devices.count = 1; ++ dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL); ++ if (!dep_devices.handles) { ++ acpi_handle_err(handle, "failed to allocate memory\n"); ++ continue; ++ } ++ ++ dep_devices.handles[0] = gsi_handle; ++ count += acpi_scan_add_dep(handle, &dep_devices); ++ } ++ ++ entry = (struct acpi_pci_routing_table *) ++ ((unsigned long)entry + entry->length); ++ } ++ ++ kfree(buffer.pointer); ++ return count; ++} ++ ++static u32 riscv_acpi_add_irq_dep(acpi_handle handle) ++{ ++ struct acpi_handle_list dep_devices; ++ acpi_handle gsi_handle; ++ u32 count = 0; ++ int i; ++ ++ for (i = 0; ++ riscv_acpi_irq_get_dep(handle, i, &gsi_handle); ++ i++) { ++ dep_devices.count = 1; ++ dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL); ++ if (!dep_devices.handles) { ++ acpi_handle_err(handle, "failed to allocate memory\n"); ++ continue; ++ } ++ ++ dep_devices.handles[0] = gsi_handle; ++ count += acpi_scan_add_dep(handle, &dep_devices); ++ } ++ ++ return count; ++} ++ ++u32 arch_acpi_add_auto_dep(acpi_handle handle) ++{ ++ if (acpi_has_method(handle, "_PRT")) ++ return riscv_acpi_add_prt_dep(handle); ++ ++ return riscv_acpi_add_irq_dep(handle); ++} +diff --git a/drivers/acpi/riscv/rhct.c b/drivers/acpi/riscv/rhct.c +index b280b3e9c7d9..caa2c16e1697 100644 +--- a/drivers/acpi/riscv/rhct.c ++++ b/drivers/acpi/riscv/rhct.c +@@ -8,8 +8,9 @@ + #define pr_fmt(fmt) "ACPI: RHCT: " fmt + + #include ++#include + +-static struct acpi_table_header *acpi_get_rhct(void) ++static struct acpi_table_rhct *acpi_get_rhct(void) + { + static struct acpi_table_header *rhct; + acpi_status status; +@@ -26,7 +27,7 @@ static struct acpi_table_header *acpi_get_rhct(void) + } + } + +- return rhct; ++ return (struct acpi_table_rhct *)rhct; + } + + /* +@@ -48,7 +49,7 @@ int acpi_get_riscv_isa(struct acpi_table_header *table, unsigned int cpu, const + BUG_ON(acpi_disabled); + + if (!table) { +- rhct = (struct acpi_table_rhct *)acpi_get_rhct(); ++ rhct = acpi_get_rhct(); + if (!rhct) + return -ENOENT; + } else { +@@ -81,3 +82,89 @@ int acpi_get_riscv_isa(struct acpi_table_header *table, unsigned int cpu, const + + return -1; + } ++ ++static void acpi_parse_hart_info_cmo_node(struct acpi_table_rhct *rhct, ++ struct acpi_rhct_hart_info *hart_info, ++ u32 *cbom_size, u32 *cboz_size, u32 *cbop_size) ++{ ++ u32 size_hartinfo = sizeof(struct acpi_rhct_hart_info); ++ u32 size_hdr = sizeof(struct acpi_rhct_node_header); ++ struct acpi_rhct_node_header *ref_node; ++ struct acpi_rhct_cmo_node *cmo_node; ++ u32 *hart_info_node_offset; ++ ++ hart_info_node_offset = ACPI_ADD_PTR(u32, hart_info, size_hartinfo); ++ for (int i = 0; i < hart_info->num_offsets; i++) { ++ ref_node = ACPI_ADD_PTR(struct acpi_rhct_node_header, ++ rhct, hart_info_node_offset[i]); ++ if (ref_node->type == ACPI_RHCT_NODE_TYPE_CMO) { ++ cmo_node = ACPI_ADD_PTR(struct acpi_rhct_cmo_node, ++ ref_node, size_hdr); ++ if (cbom_size && cmo_node->cbom_size <= 30) { ++ if (!*cbom_size) ++ *cbom_size = BIT(cmo_node->cbom_size); ++ else if (*cbom_size != BIT(cmo_node->cbom_size)) ++ pr_warn("CBOM size is not the same across harts\n"); ++ } ++ ++ if (cboz_size && cmo_node->cboz_size <= 30) { ++ if (!*cboz_size) ++ *cboz_size = BIT(cmo_node->cboz_size); ++ else if (*cboz_size != BIT(cmo_node->cboz_size)) ++ pr_warn("CBOZ size is not the same across harts\n"); ++ } ++ ++ if (cbop_size && cmo_node->cbop_size <= 30) { ++ if (!*cbop_size) ++ *cbop_size = BIT(cmo_node->cbop_size); ++ else if (*cbop_size != BIT(cmo_node->cbop_size)) ++ pr_warn("CBOP size is not the same across harts\n"); ++ } ++ } ++ } ++} ++ ++/* ++ * During early boot, the caller should call acpi_get_table() and pass its pointer to ++ * these functions (and free up later). At run time, since this table can be used ++ * multiple times, pass NULL so that the table remains in memory. ++ */ ++void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size, ++ u32 *cboz_size, u32 *cbop_size) ++{ ++ u32 size_hdr = sizeof(struct acpi_rhct_node_header); ++ struct acpi_rhct_node_header *node, *end; ++ struct acpi_rhct_hart_info *hart_info; ++ struct acpi_table_rhct *rhct; ++ ++ if (acpi_disabled) ++ return; ++ ++ if (table) { ++ rhct = (struct acpi_table_rhct *)table; ++ } else { ++ rhct = acpi_get_rhct(); ++ if (!rhct) ++ return; ++ } ++ ++ if (cbom_size) ++ *cbom_size = 0; ++ ++ if (cboz_size) ++ *cboz_size = 0; ++ ++ if (cbop_size) ++ *cbop_size = 0; ++ ++ end = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->header.length); ++ for (node = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->node_offset); ++ node < end; ++ node = ACPI_ADD_PTR(struct acpi_rhct_node_header, node, node->length)) { ++ if (node->type == ACPI_RHCT_NODE_TYPE_HART_INFO) { ++ hart_info = ACPI_ADD_PTR(struct acpi_rhct_hart_info, node, size_hdr); ++ acpi_parse_hart_info_cmo_node(rhct, hart_info, cbom_size, ++ cboz_size, cbop_size); ++ } ++ } ++} +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c +index 9a40052d31f3..84dcd3b5ce83 100644 +--- a/drivers/acpi/scan.c ++++ b/drivers/acpi/scan.c +@@ -870,6 +870,9 @@ static const char * const acpi_honor_dep_ids[] = { + "INTC1059", /* IVSC (TGL) driver must be loaded to allow i2c access to camera sensors */ + "INTC1095", /* IVSC (ADL) driver must be loaded to allow i2c access to camera sensors */ + "INTC100A", /* IVSC (RPL) driver must be loaded to allow i2c access to camera sensors */ ++ "RSCV0001", /* RISC-V PLIC */ ++ "RSCV0002", /* RISC-V APLIC */ ++ "PNP0C0F", /* PCI Link Device */ + NULL + }; + +@@ -2034,54 +2037,18 @@ void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val) + mutex_unlock(&acpi_scan_lock); + } + +-static void acpi_scan_init_hotplug(struct acpi_device *adev) +-{ +- struct acpi_hardware_id *hwid; +- +- if (acpi_dock_match(adev->handle) || is_ejectable_bay(adev)) { +- acpi_dock_add(adev); +- return; +- } +- list_for_each_entry(hwid, &adev->pnp.ids, list) { +- struct acpi_scan_handler *handler; +- +- handler = acpi_scan_match_handler(hwid->id, NULL); +- if (handler) { +- adev->flags.hotplug_notify = true; +- break; +- } +- } +-} +- +-static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep) ++int acpi_scan_add_dep(acpi_handle handle, struct acpi_handle_list *dep_devices) + { +- struct acpi_handle_list dep_devices; +- acpi_status status; + u32 count; + int i; + +- /* +- * Check for _HID here to avoid deferring the enumeration of: +- * 1. PCI devices. +- * 2. ACPI nodes describing USB ports. +- * Still, checking for _HID catches more then just these cases ... +- */ +- if (!check_dep || !acpi_has_method(handle, "_DEP") || +- !acpi_has_method(handle, "_HID")) +- return 0; +- +- status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices); +- if (ACPI_FAILURE(status)) { +- acpi_handle_debug(handle, "Failed to evaluate _DEP.\n"); +- return 0; +- } +- +- for (count = 0, i = 0; i < dep_devices.count; i++) { ++ for (count = 0, i = 0; i < dep_devices->count; i++) { + struct acpi_device_info *info; + struct acpi_dep_data *dep; + bool skip, honor_dep; ++ acpi_status status; + +- status = acpi_get_object_info(dep_devices.handles[i], &info); ++ status = acpi_get_object_info(dep_devices->handles[i], &info); + if (ACPI_FAILURE(status)) { + acpi_handle_debug(handle, "Error reading _DEP device info\n"); + continue; +@@ -2100,19 +2067,79 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep) + + count++; + +- dep->supplier = dep_devices.handles[i]; ++ dep->supplier = dep_devices->handles[i]; + dep->consumer = handle; + dep->honor_dep = honor_dep; + + mutex_lock(&acpi_dep_list_lock); +- list_add_tail(&dep->node , &acpi_dep_list); ++ list_add_tail(&dep->node, &acpi_dep_list); + mutex_unlock(&acpi_dep_list_lock); + } + ++ acpi_handle_list_free(dep_devices); + return count; + } + +-static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, ++static void acpi_scan_init_hotplug(struct acpi_device *adev) ++{ ++ struct acpi_hardware_id *hwid; ++ ++ if (acpi_dock_match(adev->handle) || is_ejectable_bay(adev)) { ++ acpi_dock_add(adev); ++ return; ++ } ++ list_for_each_entry(hwid, &adev->pnp.ids, list) { ++ struct acpi_scan_handler *handler; ++ ++ handler = acpi_scan_match_handler(hwid->id, NULL); ++ if (handler) { ++ adev->flags.hotplug_notify = true; ++ break; ++ } ++ } ++} ++ ++u32 __weak arch_acpi_add_auto_dep(acpi_handle handle) { return 0; } ++ ++static u32 acpi_scan_check_dep(acpi_handle handle) ++{ ++ struct acpi_handle_list dep_devices; ++ u32 count = 0; ++ ++ /* ++ * Some architectures like RISC-V need to add dependencies for ++ * all devices which use GSI to the interrupt controller so that ++ * interrupt controller is probed before any of those devices. ++ * Instead of mandating _DEP on all the devices, detect the ++ * dependency and add automatically. ++ */ ++ count += arch_acpi_add_auto_dep(handle); ++ ++ /* ++ * Check for _HID here to avoid deferring the enumeration of: ++ * 1. PCI devices. ++ * 2. ACPI nodes describing USB ports. ++ * Still, checking for _HID catches more then just these cases ... ++ */ ++ if (!acpi_has_method(handle, "_DEP") || !acpi_has_method(handle, "_HID")) ++ return count; ++ ++ if (!acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices)) { ++ acpi_handle_debug(handle, "Failed to evaluate _DEP.\n"); ++ return count; ++ } ++ ++ count += acpi_scan_add_dep(handle, &dep_devices); ++ return count; ++} ++ ++static acpi_status acpi_scan_check_crs_csi2_cb(acpi_handle handle, u32 a, void *b, void **c) ++{ ++ acpi_mipi_check_crs_csi2(handle); ++ return AE_OK; ++} ++ ++static acpi_status acpi_bus_check_add(acpi_handle handle, bool first_pass, + struct acpi_device **adev_p) + { + struct acpi_device *device = acpi_fetch_acpi_dev(handle); +@@ -2130,9 +2157,25 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, + if (acpi_device_should_be_hidden(handle)) + return AE_OK; + +- /* Bail out if there are dependencies. */ +- if (acpi_scan_check_dep(handle, check_dep) > 0) +- return AE_CTRL_DEPTH; ++ if (first_pass) { ++ acpi_mipi_check_crs_csi2(handle); ++ ++ /* Bail out if there are dependencies. */ ++ if (acpi_scan_check_dep(handle) > 0) { ++ /* ++ * The entire CSI-2 connection graph needs to be ++ * extracted before any drivers or scan handlers ++ * are bound to struct device objects, so scan ++ * _CRS CSI-2 resource descriptors for all ++ * devices below the current handle. ++ */ ++ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, ++ ACPI_UINT32_MAX, ++ acpi_scan_check_crs_csi2_cb, ++ NULL, NULL, NULL); ++ return AE_CTRL_DEPTH; ++ } ++ } + + fallthrough; + case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */ +@@ -2155,10 +2198,10 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, + } + + /* +- * If check_dep is true at this point, the device has no dependencies, ++ * If first_pass is true at this point, the device has no dependencies, + * or the creation of the device object would have been postponed above. + */ +- acpi_add_single_object(&device, handle, type, !check_dep); ++ acpi_add_single_object(&device, handle, type, !first_pass); + if (!device) + return AE_CTRL_DEPTH; + +@@ -2581,12 +2624,21 @@ int acpi_bus_scan(acpi_handle handle) + if (!device) + return -ENODEV; + ++ /* ++ * Allocate ACPI _CRS CSI-2 software nodes using information extracted ++ * from the _CRS CSI-2 resource descriptors during the ACPI namespace ++ * walk above. ++ */ ++ acpi_mipi_scan_crs_csi2(); ++ + acpi_bus_attach(device, (void *)true); + + /* Pass 2: Enumerate all of the remaining devices. */ + + acpi_scan_postponed(); + ++ acpi_mipi_crs_csi2_cleanup(); ++ + return 0; + } + EXPORT_SYMBOL(acpi_bus_scan); +@@ -2735,6 +2787,8 @@ static int __init acpi_match_madt(union acpi_subtable_headers *header, + return 0; + } + ++void __weak arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr) { } ++ + int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) + { + int count = 0; +@@ -2743,6 +2797,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) + return 0; + + mutex_lock(&acpi_probe_mutex); ++ arch_sort_irqchip_probe(ap_head, nr); + for (ape = ap_head; nr; ape++, nr--) { + if (ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) { + acpi_probe_count = 0; +diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c +index 8263508415a8..9b5a1c786230 100644 +--- a/drivers/acpi/thermal.c ++++ b/drivers/acpi/thermal.c +@@ -297,9 +297,8 @@ static void __acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) + } + if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.passive.trip.valid) { + memset(&devices, 0, sizeof(struct acpi_handle_list)); +- status = acpi_evaluate_reference(tz->device->handle, "_PSL", +- NULL, &devices); +- if (ACPI_FAILURE(status)) { ++ if (!acpi_evaluate_reference(tz->device->handle, "_PSL", ++ NULL, &devices)) { + acpi_handle_info(tz->device->handle, + "Invalid passive threshold\n"); + tz->trips.passive.trip.valid = false; +@@ -307,10 +306,10 @@ static void __acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) + tz->trips.passive.trip.valid = true; + } + +- if (memcmp(&tz->trips.passive.devices, &devices, +- sizeof(struct acpi_handle_list))) { +- memcpy(&tz->trips.passive.devices, &devices, +- sizeof(struct acpi_handle_list)); ++ if (acpi_handle_list_equal(&tz->trips.passive.devices, &devices)) { ++ acpi_handle_list_free(&devices); ++ } else { ++ acpi_handle_list_replace(&tz->trips.passive.devices, &devices); + ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device"); + } + } +@@ -362,9 +361,8 @@ static void __acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) + name[2] = 'L'; + if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].trip.valid) { + memset(&devices, 0, sizeof(struct acpi_handle_list)); +- status = acpi_evaluate_reference(tz->device->handle, +- name, NULL, &devices); +- if (ACPI_FAILURE(status)) { ++ if (!acpi_evaluate_reference(tz->device->handle, ++ name, NULL, &devices)) { + acpi_handle_info(tz->device->handle, + "Invalid active%d threshold\n", i); + tz->trips.active[i].trip.valid = false; +@@ -372,10 +370,10 @@ static void __acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) + tz->trips.active[i].trip.valid = true; + } + +- if (memcmp(&tz->trips.active[i].devices, &devices, +- sizeof(struct acpi_handle_list))) { +- memcpy(&tz->trips.active[i].devices, &devices, +- sizeof(struct acpi_handle_list)); ++ if (acpi_handle_list_equal(&tz->trips.active[i].devices, &devices)) { ++ acpi_handle_list_free(&devices); ++ } else { ++ acpi_handle_list_replace(&tz->trips.active[i].devices, &devices); + ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device"); + } + } +@@ -389,12 +387,14 @@ static void __acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) + + if (flag & ACPI_TRIPS_DEVICES) { + memset(&devices, 0, sizeof(devices)); +- status = acpi_evaluate_reference(tz->device->handle, "_TZD", +- NULL, &devices); +- if (ACPI_SUCCESS(status) && +- memcmp(&tz->devices, &devices, sizeof(devices))) { +- tz->devices = devices; +- ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device"); ++ if (acpi_evaluate_reference(tz->device->handle, "_TZD", ++ NULL, &devices)) { ++ if (acpi_handle_list_equal(&tz->devices, &devices)) { ++ acpi_handle_list_free(&devices); ++ } else { ++ acpi_handle_list_replace(&tz->devices, &devices); ++ ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device"); ++ } + } + } + } +@@ -920,6 +920,18 @@ static void acpi_thermal_check_fn(struct work_struct *work) + mutex_unlock(&tz->thermal_check_lock); + } + ++static void acpi_thermal_free_thermal_zone(struct acpi_thermal *tz) ++{ ++ int i; ++ ++ acpi_handle_list_free(&tz->trips.passive.devices); ++ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) ++ acpi_handle_list_free(&tz->trips.active[i].devices); ++ acpi_handle_list_free(&tz->devices); ++ ++ kfree(tz); ++} ++ + static int acpi_thermal_add(struct acpi_device *device) + { + struct acpi_thermal *tz; +@@ -966,7 +978,7 @@ static int acpi_thermal_add(struct acpi_device *device) + flush_workqueue(acpi_thermal_pm_queue); + acpi_thermal_unregister_thermal_zone(tz); + free_memory: +- kfree(tz); ++ acpi_thermal_free_thermal_zone(tz); + + return result; + } +@@ -986,7 +998,7 @@ static void acpi_thermal_remove(struct acpi_device *device) + flush_workqueue(acpi_thermal_pm_queue); + acpi_thermal_unregister_thermal_zone(tz); + kfree(tz->trip_table); +- kfree(tz); ++ acpi_thermal_free_thermal_zone(tz); + } + + #ifdef CONFIG_PM_SLEEP +diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c +index 2ea14648a661..e84106a4ef59 100644 +--- a/drivers/acpi/utils.c ++++ b/drivers/acpi/utils.c +@@ -329,22 +329,18 @@ const char *acpi_get_subsystem_id(acpi_handle handle) + } + EXPORT_SYMBOL_GPL(acpi_get_subsystem_id); + +-acpi_status +-acpi_evaluate_reference(acpi_handle handle, +- acpi_string pathname, +- struct acpi_object_list *arguments, +- struct acpi_handle_list *list) ++bool acpi_evaluate_reference(acpi_handle handle, acpi_string pathname, ++ struct acpi_object_list *arguments, ++ struct acpi_handle_list *list) + { +- acpi_status status = AE_OK; +- union acpi_object *package = NULL; +- union acpi_object *element = NULL; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; +- u32 i = 0; +- ++ union acpi_object *package; ++ acpi_status status; ++ bool ret = false; ++ u32 i; + +- if (!list) { +- return AE_BAD_PARAMETER; +- } ++ if (!list) ++ return false; + + /* Evaluate object. */ + +@@ -354,64 +350,106 @@ acpi_evaluate_reference(acpi_handle handle, + + package = buffer.pointer; + +- if ((buffer.length == 0) || !package) { +- status = AE_BAD_DATA; +- acpi_util_eval_error(handle, pathname, status); +- goto end; +- } +- if (package->type != ACPI_TYPE_PACKAGE) { +- status = AE_BAD_DATA; +- acpi_util_eval_error(handle, pathname, status); +- goto end; +- } +- if (!package->package.count) { +- status = AE_BAD_DATA; +- acpi_util_eval_error(handle, pathname, status); +- goto end; +- } ++ if (buffer.length == 0 || !package || ++ package->type != ACPI_TYPE_PACKAGE || !package->package.count) ++ goto err; + +- if (package->package.count > ACPI_MAX_HANDLES) { +- kfree(package); +- return AE_NO_MEMORY; +- } + list->count = package->package.count; ++ list->handles = kcalloc(list->count, sizeof(*list->handles), GFP_KERNEL); ++ if (!list->handles) ++ goto err_clear; + + /* Extract package data. */ + + for (i = 0; i < list->count; i++) { ++ union acpi_object *element = &(package->package.elements[i]); + +- element = &(package->package.elements[i]); ++ if (element->type != ACPI_TYPE_LOCAL_REFERENCE || ++ !element->reference.handle) ++ goto err_free; + +- if (element->type != ACPI_TYPE_LOCAL_REFERENCE) { +- status = AE_BAD_DATA; +- acpi_util_eval_error(handle, pathname, status); +- break; +- } +- +- if (!element->reference.handle) { +- status = AE_NULL_ENTRY; +- acpi_util_eval_error(handle, pathname, status); +- break; +- } + /* Get the acpi_handle. */ + + list->handles[i] = element->reference.handle; + acpi_handle_debug(list->handles[i], "Found in reference list\n"); + } + +- end: +- if (ACPI_FAILURE(status)) { +- list->count = 0; +- //kfree(list->handles); +- } ++ ret = true; + ++end: + kfree(buffer.pointer); + +- return status; ++ return ret; ++ ++err_free: ++ kfree(list->handles); ++ list->handles = NULL; ++ ++err_clear: ++ list->count = 0; ++ ++err: ++ acpi_util_eval_error(handle, pathname, status); ++ goto end; + } + + EXPORT_SYMBOL(acpi_evaluate_reference); + ++/** ++ * acpi_handle_list_equal - Check if two ACPI handle lists are the same ++ * @list1: First list to compare. ++ * @list2: Second list to compare. ++ * ++ * Return true if the given ACPI handle lists are of the same size and ++ * contain the same ACPI handles in the same order. Otherwise, return false. ++ */ ++bool acpi_handle_list_equal(struct acpi_handle_list *list1, ++ struct acpi_handle_list *list2) ++{ ++ return list1->count == list2->count && ++ !memcmp(list1->handles, list2->handles, ++ list1->count * sizeof(*list1->handles)); ++} ++EXPORT_SYMBOL_GPL(acpi_handle_list_equal); ++ ++/** ++ * acpi_handle_list_replace - Replace one ACPI handle list with another ++ * @dst: ACPI handle list to replace. ++ * @src: Source ACPI handle list. ++ * ++ * Free the handles table in @dst, move the handles table from @src to @dst, ++ * copy count from @src to @dst and clear @src. ++ */ ++void acpi_handle_list_replace(struct acpi_handle_list *dst, ++ struct acpi_handle_list *src) ++{ ++ if (dst->count) ++ kfree(dst->handles); ++ ++ dst->count = src->count; ++ dst->handles = src->handles; ++ ++ src->handles = NULL; ++ src->count = 0; ++} ++EXPORT_SYMBOL_GPL(acpi_handle_list_replace); ++ ++/** ++ * acpi_handle_list_free - Free the handles table in an ACPI handle list ++ * @list: ACPI handle list to free. ++ * ++ * Free the handles table in @list and clear its count field. ++ */ ++void acpi_handle_list_free(struct acpi_handle_list *list) ++{ ++ if (!list->count) ++ return; ++ ++ kfree(list->handles); ++ list->count = 0; ++} ++EXPORT_SYMBOL_GPL(acpi_handle_list_free); ++ + acpi_status + acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld) + { +diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c +index 96281de7010d..7f3ea78722fa 100644 +--- a/drivers/base/arch_numa.c ++++ b/drivers/base/arch_numa.c +@@ -530,7 +530,7 @@ static int __init arch_acpi_numa_init(void) + + ret = acpi_numa_init(); + if (ret) { +- pr_info("Failed to initialise from firmware\n"); ++ pr_debug("Failed to initialise from firmware\n"); + return ret; + } + diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index 3348d4db5f1b..0d01890160f3 100644 --- a/drivers/base/platform-msi.c @@ -22675,10 +41005,18 @@ index 74fa2055868b..6a935100e1ae 100644 if (io.irq) io.irq_setup = ipmi_std_irq_setup; diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig -index c30099866174..c8b005b647dd 100644 +index c30099866174..83976f7b9755 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig -@@ -501,6 +501,7 @@ source "drivers/clk/visconti/Kconfig" +@@ -490,6 +490,7 @@ source "drivers/clk/rockchip/Kconfig" + source "drivers/clk/samsung/Kconfig" + source "drivers/clk/sifive/Kconfig" + source "drivers/clk/socfpga/Kconfig" ++source "drivers/clk/spacemit/Kconfig" + source "drivers/clk/sprd/Kconfig" + source "drivers/clk/starfive/Kconfig" + source "drivers/clk/sunxi/Kconfig" +@@ -501,6 +502,7 @@ source "drivers/clk/visconti/Kconfig" source "drivers/clk/x86/Kconfig" source "drivers/clk/xilinx/Kconfig" source "drivers/clk/zynqmp/Kconfig" @@ -22687,10 +41025,18 @@ index c30099866174..c8b005b647dd 100644 # Kunit test cases config CLK_KUNIT_TEST diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile -index 18969cbd4bb1..adcbb9f95baa 100644 +index 18969cbd4bb1..1e4e2e292f5d 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile -@@ -124,6 +124,7 @@ obj-$(CONFIG_ARCH_STM32) += stm32/ +@@ -117,6 +117,7 @@ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/ + obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/ + obj-$(CONFIG_CLK_SIFIVE) += sifive/ + obj-y += socfpga/ ++obj-$(CONFIG_SOC_SPACEMIT) += spacemit/ + obj-$(CONFIG_PLAT_SPEAR) += spear/ + obj-y += sprd/ + obj-$(CONFIG_ARCH_STI) += st/ +@@ -124,6 +125,7 @@ obj-$(CONFIG_ARCH_STM32) += stm32/ obj-y += starfive/ obj-$(CONFIG_ARCH_SUNXI) += sunxi/ obj-y += sunxi-ng/ @@ -22698,7 +41044,7 @@ index 18969cbd4bb1..adcbb9f95baa 100644 obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-y += ti/ obj-$(CONFIG_CLK_UNIPHIER) += uniphier/ -@@ -136,3 +137,4 @@ endif +@@ -136,3 +138,4 @@ endif obj-y += xilinx/ obj-$(CONFIG_ARCH_ZYNQ) += zynq/ obj-$(CONFIG_COMMON_CLK_ZYNQMP) += zynqmp/ @@ -22714,10 +41060,10 @@ index 000000000000..55997fc07b5b +obj-$(CONFIG_ARCH_SOPHGO) += clk-mango.o diff --git a/drivers/clk/sophgo/clk-dummy.c b/drivers/clk/sophgo/clk-dummy.c new file mode 100644 -index 000000000000..99af0e6dae6a +index 000000000000..ddbbcf55b964 --- /dev/null +++ b/drivers/clk/sophgo/clk-dummy.c -@@ -0,0 +1,600 @@ +@@ -0,0 +1,594 @@ +/* + * Copyright (c) 2022 SOPHGO + * @@ -22765,13 +41111,11 @@ index 000000000000..99af0e6dae6a + struct of_phandle_args clkspec; + int rc, index = 0; + u32 rate; -+ struct property *prop; -+ const __be32 *cur; + struct clk *clk; + + node = of_find_node_by_name(NULL, "default_rates"); + -+ of_property_for_each_u32 (node, "clock-rates", prop, cur, rate) { ++ of_property_for_each_u32 (node, "clock-rates", rate) { + if (rate) { + rc = of_parse_phandle_with_args(node, "clocks", + "#clock-cells", index, &clkspec); @@ -22820,12 +41164,10 @@ index 000000000000..99af0e6dae6a + struct of_phandle_args clkspec; + int rc, index = 0; + u32 rate; -+ struct property *prop; -+ const __be32 *cur; + + node = of_find_node_by_name(NULL, "default_rates"); + -+ of_property_for_each_u32 (node, "clock-rates", prop, cur, rate) { ++ of_property_for_each_u32 (node, "clock-rates", rate) { + if (rate) { + rc = of_parse_phandle_with_args(node, "clocks", + "#clock-cells", index, &clkspec); @@ -22948,13 +41290,11 @@ index 000000000000..99af0e6dae6a +int dm_set_default_clk_rates(struct device_node *node) +{ + struct of_phandle_args clkspec; -+ struct property *prop; -+ const __be32 *cur; + int rc, index = 0; + struct clk *clk; + u32 rate; + -+ of_property_for_each_u32 (node, "clock-rates", prop, cur, rate) { ++ of_property_for_each_u32 (node, "clock-rates", rate) { + if (rate) { + rc = of_parse_phandle_with_args(node, "clocks", + "#clock-cells", index, &clkspec); @@ -24303,10 +42643,10 @@ index 000000000000..7f386092f764 +CLK_OF_DECLARE(dm_mango_clk_default_rate, "mango, dm-clk-default-rates", mango_clk_init); diff --git a/drivers/clk/sophgo/clk.c b/drivers/clk/sophgo/clk.c new file mode 100644 -index 000000000000..4d3893ace2b9 +index 000000000000..c77f2f631a8c --- /dev/null +++ b/drivers/clk/sophgo/clk.c -@@ -0,0 +1,883 @@ +@@ -0,0 +1,881 @@ +/* + * Copyright (c) 2022 SOPHGO + * @@ -24820,13 +43160,11 @@ index 000000000000..4d3893ace2b9 +int set_default_clk_rates(struct device_node *node) +{ + struct of_phandle_args clkspec; -+ struct property *prop; -+ const __be32 *cur; + int rc, index = 0; + struct clk *clk; + u32 rate; + -+ of_property_for_each_u32 (node, "clock-rates", prop, cur, rate) { ++ of_property_for_each_u32 (node, "clock-rates", rate) { + if (rate) { + rc = of_parse_phandle_with_args(node, "clocks", + "#clock-cells", index, &clkspec); @@ -25348,124 +43686,4442 @@ index 000000000000..81e9f9eb1b20 +(struct device_node *node, struct mango_clk_data *clk_data, const char *name); +int dm_set_default_clk_rates(struct device_node *node); +#endif -diff --git a/drivers/clk/xuantie/Kconfig b/drivers/clk/xuantie/Kconfig +diff --git a/drivers/clk/spacemit/Kconfig b/drivers/clk/spacemit/Kconfig new file mode 100644 -index 000000000000..9a2ee8c01bf3 +index 000000000000..fe905e7cf2d3 --- /dev/null -+++ b/drivers/clk/xuantie/Kconfig -@@ -0,0 +1,12 @@ ++++ b/drivers/clk/spacemit/Kconfig +@@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 ++# common clock support for SPACEMIT SoC family. + -+config XUANTIE_CLK -+ bool -+ def_bool ARCH_XUANTIE ++config SPACEMIT_K1X_CCU ++ tristate "Clock support for Spacemit k1x SoCs" ++ depends on SOC_SPACEMIT_K1X ++ help ++ Build the driver for Spacemit K1x Clock Driver. + -+config CLK_TH1520_FM -+ bool "XuanTie Th1520 Fullmask Clock Driver" -+ depends on ARCH_XUANTIE -+ default n -+ help -+ Build the driver for th1520 fullmask Clock Driver -diff --git a/drivers/clk/xuantie/Makefile b/drivers/clk/xuantie/Makefile +diff --git a/drivers/clk/spacemit/Makefile b/drivers/clk/spacemit/Makefile new file mode 100644 -index 000000000000..58e0ab431ae5 +index 000000000000..6bfb749658d7 --- /dev/null -+++ b/drivers/clk/xuantie/Makefile -@@ -0,0 +1,7 @@ ++++ b/drivers/clk/spacemit/Makefile +@@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 ++# ++# Spacemit Clock specific Makefile ++# + -+obj-$(CONFIG_XUANTIE_CLK) += \ -+ clk.o -+ -+obj-$(CONFIG_CLK_TH1520_FM) += clk-th1520-fm.o -+obj-$(CONFIG_CLK_TH1520_FM) += gate/ -diff --git a/drivers/clk/xuantie/clk-th1520-fm.c b/drivers/clk/xuantie/clk-th1520-fm.c ++obj-$(CONFIG_SPACEMIT_K1X_CCU) += ccu-spacemit-k1x.o ++obj-$(CONFIG_SPACEMIT_K1X_CCU) += ccu_mix.o ++obj-$(CONFIG_SPACEMIT_K1X_CCU) += ccu_pll.o ++obj-$(CONFIG_SPACEMIT_K1X_CCU) += ccu_dpll.o ++obj-$(CONFIG_SPACEMIT_K1X_CCU) += ccu_ddn.o ++obj-$(CONFIG_SPACEMIT_K1X_CCU) += ccu_ddr.o +diff --git a/drivers/clk/spacemit/ccu-spacemit-k1x.c b/drivers/clk/spacemit/ccu-spacemit-k1x.c new file mode 100644 -index 000000000000..33b5aa6127fa +index 000000000000..e4b176b39247 --- /dev/null -+++ b/drivers/clk/xuantie/clk-th1520-fm.c -@@ -0,0 +1,646 @@ -+// SPDX-License-Identifier: GPL-2.0 ++++ b/drivers/clk/spacemit/ccu-spacemit-k1x.c +@@ -0,0 +1,2123 @@ ++// SPDX-License-Identifier: GPL-2.0-only +/* -+ * Copyright (C) 2021 Alibaba Group Holding Limited. ++ * Spacemit k1x clock controller driver ++ * ++ * Copyright (c) 2023, spacemit Corporation. ++ * + */ + -+#include ++#include +#include -+#include -+#include -+#include +#include +#include -+#include -+#include +#include +#include -+#include ++#include ++#include "ccu-spacemit-k1x.h" ++#include "ccu_mix.h" ++#include "ccu_pll.h" ++#include "ccu_ddn.h" ++#include "ccu_dpll.h" ++#include "ccu_ddr.h" ++ ++DEFINE_SPINLOCK(g_cru_lock); ++ ++/* APBS register offset */ ++/* pll1 */ ++#define APB_SPARE1_REG 0x100 ++#define APB_SPARE2_REG 0x104 ++#define APB_SPARE3_REG 0x108 ++/* pll2 */ ++#define APB_SPARE7_REG 0x118 ++#define APB_SPARE8_REG 0x11c ++#define APB_SPARE9_REG 0x120 ++/* pll3 */ ++#define APB_SPARE10_REG 0x124 ++#define APB_SPARE11_REG 0x128 ++#define APB_SPARE12_REG 0x12c ++/* end of APBS register offset */ ++ ++/* APBC register offset */ ++#define APBC_UART1_CLK_RST 0x0 ++#define APBC_UART2_CLK_RST 0x4 ++#define APBC_GPIO_CLK_RST 0x8 ++#define APBC_PWM0_CLK_RST 0xc ++#define APBC_PWM1_CLK_RST 0x10 ++#define APBC_PWM2_CLK_RST 0x14 ++#define APBC_PWM3_CLK_RST 0x18 ++#define APBC_TWSI8_CLK_RST 0x20 ++#define APBC_UART3_CLK_RST 0x24 ++#define APBC_RTC_CLK_RST 0x28 ++#define APBC_TWSI0_CLK_RST 0x2c ++#define APBC_TWSI1_CLK_RST 0x30 ++#define APBC_TIMERS1_CLK_RST 0x34 ++#define APBC_TWSI2_CLK_RST 0x38 ++#define APBC_AIB_CLK_RST 0x3c ++#define APBC_TWSI4_CLK_RST 0x40 ++#define APBC_TIMERS2_CLK_RST 0x44 ++#define APBC_ONEWIRE_CLK_RST 0x48 ++#define APBC_TWSI5_CLK_RST 0x4c ++#define APBC_DRO_CLK_RST 0x58 ++#define APBC_IR_CLK_RST 0x5c ++#define APBC_TWSI6_CLK_RST 0x60 ++#define APBC_COUNTER_CLK_SEL 0x64 ++ ++#define APBC_TWSI7_CLK_RST 0x68 ++#define APBC_TSEN_CLK_RST 0x6c ++ ++#define APBC_UART4_CLK_RST 0x70 ++#define APBC_UART5_CLK_RST 0x74 ++#define APBC_UART6_CLK_RST 0x78 ++#define APBC_SSP3_CLK_RST 0x7c ++ ++#define APBC_SSPA0_CLK_RST 0x80 ++#define APBC_SSPA1_CLK_RST 0x84 ++ ++#define APBC_IPC_AP2AUD_CLK_RST 0x90 ++#define APBC_UART7_CLK_RST 0x94 ++#define APBC_UART8_CLK_RST 0x98 ++#define APBC_UART9_CLK_RST 0x9c ++ ++#define APBC_CAN0_CLK_RST 0xa0 ++#define APBC_PWM4_CLK_RST 0xa8 ++#define APBC_PWM5_CLK_RST 0xac ++#define APBC_PWM6_CLK_RST 0xb0 ++#define APBC_PWM7_CLK_RST 0xb4 ++#define APBC_PWM8_CLK_RST 0xb8 ++#define APBC_PWM9_CLK_RST 0xbc ++#define APBC_PWM10_CLK_RST 0xc0 ++#define APBC_PWM11_CLK_RST 0xc4 ++#define APBC_PWM12_CLK_RST 0xc8 ++#define APBC_PWM13_CLK_RST 0xcc ++#define APBC_PWM14_CLK_RST 0xd0 ++#define APBC_PWM15_CLK_RST 0xd4 ++#define APBC_PWM16_CLK_RST 0xd8 ++#define APBC_PWM17_CLK_RST 0xdc ++#define APBC_PWM18_CLK_RST 0xe0 ++#define APBC_PWM19_CLK_RST 0xe4 ++/* end of APBC register offset */ ++ ++/* MPMU register offset */ ++#define MPMU_POSR 0x10 ++#define POSR_PLL1_LOCK BIT(27) ++#define POSR_PLL2_LOCK BIT(28) ++#define POSR_PLL3_LOCK BIT(29) ++ ++#define MPMU_VRCR 0x18 ++#define MPMU_VRCR_REQ_EN0 BIT(0) ++#define MPMU_VRCR_REQ_EN2 BIT(2) ++#define MPMU_VRCR_REQ_POL2 BIT(6) ++#define MPMU_VRCR_VCXO_OUT_REQ_EN2 BIT(14) ++ ++#define MPMU_WDTPCR 0x200 ++#define MPMU_RIPCCR 0x210 ++#define MPMU_ACGR 0x1024 ++#define MPMU_SUCCR 0x14 ++#define MPMU_ISCCR 0x44 ++#define MPMU_SUCCR_1 0x10b0 ++#define MPMU_APBCSCR 0x1050 ++ ++/* end of MPMU register offset */ ++ ++/* APMU register offset */ ++#define APMU_JPG_CLK_RES_CTRL 0x20 ++#define APMU_CSI_CCIC2_CLK_RES_CTRL 0x24 ++#define APMU_ISP_CLK_RES_CTRL 0x38 ++#define APMU_LCD_CLK_RES_CTRL1 0x44 ++#define APMU_LCD_SPI_CLK_RES_CTRL 0x48 ++#define APMU_LCD_CLK_RES_CTRL2 0x4c ++#define APMU_CCIC_CLK_RES_CTRL 0x50 ++#define APMU_SDH0_CLK_RES_CTRL 0x54 ++#define APMU_SDH1_CLK_RES_CTRL 0x58 ++#define APMU_USB_CLK_RES_CTRL 0x5c ++#define APMU_QSPI_CLK_RES_CTRL 0x60 ++#define APMU_USB_CLK_RES_CTRL 0x5c ++#define APMU_DMA_CLK_RES_CTRL 0x64 ++#define APMU_AES_CLK_RES_CTRL 0x68 ++#define APMU_VPU_CLK_RES_CTRL 0xa4 ++#define APMU_GPU_CLK_RES_CTRL 0xcc ++#define APMU_SDH2_CLK_RES_CTRL 0xe0 ++#define APMU_PMUA_MC_CTRL 0xe8 ++#define APMU_PMU_CC2_AP 0x100 ++#define APMU_PMUA_EM_CLK_RES_CTRL 0x104 ++ ++#define APMU_AUDIO_CLK_RES_CTRL 0x14c ++#define APMU_HDMI_CLK_RES_CTRL 0x1B8 ++#define APMU_CCI550_CLK_CTRL 0x300 ++#define APMU_ACLK_CLK_CTRL 0x388 ++#define APMU_CPU_C0_CLK_CTRL 0x38C ++#define APMU_CPU_C1_CLK_CTRL 0x390 ++ ++#define APMU_PCIE_CLK_RES_CTRL_0 0x3cc ++#define APMU_PCIE_CLK_RES_CTRL_1 0x3d4 ++#define APMU_PCIE_CLK_RES_CTRL_2 0x3dc ++ ++#define APMU_EMAC0_CLK_RES_CTRL 0x3e4 ++#define APMU_EMAC1_CLK_RES_CTRL 0x3ec ++ ++#define APMU_DFC_AP 0x180 ++#define APMU_DFC_STATUS 0x188 ++ ++#define APMU_DFC_LEVEL0 0x190 ++#define APMU_DFC_LEVEL1 0x194 ++#define APMU_DFC_LEVEL2 0x198 ++#define APMU_DFC_LEVEL3 0x19c ++#define APMU_DFC_LEVEL4 0x1a0 ++#define APMU_DFC_LEVEL5 0x1a4 ++#define APMU_DFC_LEVEL6 0x1a8 ++#define APMU_DFC_LEVEL7 0x1ac ++ ++#define APMU_DPLL1_CLK_CTRL1 0x39c ++#define APMU_DPLL1_CLK_CTRL2 0x3a0 ++#define APMU_DPLL2_CLK_CTRL1 0x3a8 ++#define APMU_DPLL2_CLK_CTRL2 0x3ac ++/* end of APMU register offset */ ++ ++/* APBC2 register offset */ ++#define APBC2_UART1_CLK_RST 0x00 ++#define APBC2_SSP2_CLK_RST 0x04 ++#define APBC2_TWSI3_CLK_RST 0x08 ++#define APBC2_RTC_CLK_RST 0x0c ++#define APBC2_TIMERS0_CLK_RST 0x10 ++#define APBC2_KPC_CLK_RST 0x14 ++#define APBC2_GPIO_CLK_RST 0x1c ++/* end of APBC2 register offset */ ++ ++/* RCPU register offset */ ++#define RCPU_HDMI_CLK_RST 0x2044 ++#define RCPU_CAN_CLK_RST 0x4c ++#define RCPU_I2C0_CLK_RST 0x30 ++ ++#define RCPU_SSP0_CLK_RST 0x28 ++#define RCPU_IR_CLK_RST 0x48 ++#define RCPU_UART0_CLK_RST 0xd8 ++#define RCPU_UART1_CLK_RST 0x3c ++/* end of RCPU register offset */ ++ ++/* RCPU2 register offset */ ++#define RCPU2_PWM0_CLK_RST 0x00 ++#define RCPU2_PWM1_CLK_RST 0x04 ++#define RCPU2_PWM2_CLK_RST 0x08 ++#define RCPU2_PWM3_CLK_RST 0x0c ++#define RCPU2_PWM4_CLK_RST 0x10 ++#define RCPU2_PWM5_CLK_RST 0x14 ++#define RCPU2_PWM6_CLK_RST 0x18 ++#define RCPU2_PWM7_CLK_RST 0x1c ++#define RCPU2_PWM8_CLK_RST 0x20 ++#define RCPU2_PWM9_CLK_RST 0x24 ++/* end of RCPU2 register offset */ ++ ++struct spacemit_k1x_clk k1x_clock_controller; ++ ++static const struct ccu_pll_rate_tbl pll2_rate_tbl[] = { ++ PLL_RATE(3000000000UL, 0x66, 0xdd, 0x50, 0x00, 0x3f, 0xe00000), ++ PLL_RATE(3200000000UL, 0x67, 0xdd, 0x50, 0x00, 0x43, 0xeaaaab), ++ PLL_RATE(2457600000UL, 0x64, 0xdd, 0x50, 0x00, 0x33, 0x0ccccd), ++ PLL_RATE(2800000000UL, 0x66, 0xdd, 0x50, 0x00, 0x3a, 0x155555), ++}; ++ ++static const struct ccu_pll_rate_tbl pll3_rate_tbl[] = { ++ PLL_RATE(1600000000UL, 0x61, 0xcd, 0x50, 0x00, 0x43, 0xeaaaab), ++ PLL_RATE(1800000000UL, 0x61, 0xcd, 0x50, 0x00, 0x4b, 0x000000), ++ PLL_RATE(2000000000UL, 0x62, 0xdd, 0x50, 0x00, 0x2a, 0xeaaaab), ++ PLL_RATE(3000000000UL, 0x66, 0xdd, 0x50, 0x00, 0x3f, 0xe00000), ++ PLL_RATE(3200000000UL, 0x67, 0xdd, 0x50, 0x00, 0x43, 0xeaaaab), ++ PLL_RATE(2457600000UL, 0x64, 0xdd, 0x50, 0x00, 0x33, 0x0ccccd), ++}; ++ ++static SPACEMIT_CCU_PLL(pll2, "pll2", &pll2_rate_tbl, ++ ARRAY_SIZE(pll2_rate_tbl), ++ BASE_TYPE_APBS, APB_SPARE7_REG, APB_SPARE8_REG, APB_SPARE9_REG, ++ MPMU_POSR, POSR_PLL2_LOCK, 1, ++ CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_PLL(pll3, "pll3", &pll3_rate_tbl, ++ ARRAY_SIZE(pll3_rate_tbl), ++ BASE_TYPE_APBS, APB_SPARE10_REG, APB_SPARE11_REG, APB_SPARE12_REG, ++ MPMU_POSR, POSR_PLL3_LOCK, 1, ++ CLK_IGNORE_UNUSED); ++ ++/* pll1 */ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d2, "pll1_d2", "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(1), BIT(1), 0x0, ++ 2, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d3, "pll1_d3", "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(2), BIT(2), 0x0, ++ 3, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d4, "pll1_d4", "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(3), BIT(3), 0x0, ++ 4, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d5, "pll1_d5", "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(4), BIT(4), 0x0, ++ 5, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d6, "pll1_d6", "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(5), BIT(5), 0x0, ++ 6, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d7, "pll1_d7", "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(6), BIT(6), 0x0, ++ 7, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d8, "pll1_d8", "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(7), BIT(7), 0x0, ++ 8, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d11_223p4, "pll1_d11_223p4", ++ "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(15), BIT(15), 0x0, ++ 11, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d13_189, "pll1_d13_189", ++ "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(16), BIT(16), 0x0, ++ 13, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d23_106p8, "pll1_d23_106p8", ++ "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(20), BIT(20), 0x0, ++ 23, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d64_38p4, "pll1_d64_38p4", ++ "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(0), BIT(0), 0x0, ++ 64, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_aud_245p7, "pll1_aud_245p7", ++ "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(10), BIT(10), 0x0, ++ 10, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_aud_24p5, "pll1_aud_24p5", ++ "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(11), BIT(11), 0x0, ++ 100, 1, CLK_IGNORE_UNUSED); ++ ++/* pll2 */ ++static SPACEMIT_CCU_GATE_FACTOR(pll2_d1, "pll2_d1", "pll2", ++ BASE_TYPE_APBS, APB_SPARE8_REG, ++ BIT(0), BIT(0), 0x0, ++ 1, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll2_d2, "pll2_d2", "pll2", ++ BASE_TYPE_APBS, APB_SPARE8_REG, ++ BIT(1), BIT(1), 0x0, ++ 2, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll2_d3, "pll2_d3", "pll2", ++ BASE_TYPE_APBS, APB_SPARE8_REG, ++ BIT(2), BIT(2), 0x0, ++ 3, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll2_d4, "pll2_d4", "pll2", ++ BASE_TYPE_APBS, APB_SPARE8_REG, ++ BIT(3), BIT(3), 0x0, ++ 4, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll2_d5, "pll2_d5", "pll2", ++ BASE_TYPE_APBS, APB_SPARE8_REG, ++ BIT(4), BIT(4), 0x0, ++ 5, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll2_d6, "pll2_d6", "pll2", ++ BASE_TYPE_APBS, APB_SPARE8_REG, ++ BIT(5), BIT(5), 0x0, ++ 6, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll2_d7, "pll2_d7", "pll2", ++ BASE_TYPE_APBS, APB_SPARE8_REG, ++ BIT(6), BIT(6), 0x0, ++ 7, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll2_d8, "pll2_d8", "pll2", ++ BASE_TYPE_APBS, APB_SPARE8_REG, ++ BIT(7), BIT(7), 0x0, ++ 8, 1, CLK_IGNORE_UNUSED); ++ ++/* pll3 */ ++static SPACEMIT_CCU_GATE_FACTOR(pll3_d1, "pll3_d1", "pll3", ++ BASE_TYPE_APBS, APB_SPARE11_REG, ++ BIT(0), BIT(0), 0x0, ++ 1, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll3_d2, "pll3_d2", "pll3", ++ BASE_TYPE_APBS, APB_SPARE11_REG, ++ BIT(1), BIT(1), 0x0, ++ 2, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll3_d3, "pll3_d3", "pll3", ++ BASE_TYPE_APBS, APB_SPARE11_REG, ++ BIT(2), BIT(2), 0x0, ++ 3, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll3_d4, "pll3_d4", "pll3", ++ BASE_TYPE_APBS, APB_SPARE11_REG, ++ BIT(3), BIT(3), 0x0, ++ 4, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll3_d5, "pll3_d5", "pll3", ++ BASE_TYPE_APBS, APB_SPARE11_REG, ++ BIT(4), BIT(4), 0x0, ++ 5, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll3_d6, "pll3_d6", "pll3", ++ BASE_TYPE_APBS, APB_SPARE11_REG, ++ BIT(5), BIT(5), 0x0, ++ 6, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll3_d7, "pll3_d7", "pll3", ++ BASE_TYPE_APBS, APB_SPARE11_REG, ++ BIT(6), BIT(6), 0x0, ++ 7, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll3_d8, "pll3_d8", "pll3", ++ BASE_TYPE_APBS, APB_SPARE11_REG, ++ BIT(7), BIT(7), 0x0, ++ 8, 1, CLK_IGNORE_UNUSED); ++ ++/* pll3_div */ ++static SPACEMIT_CCU_FACTOR(pll3_80, "pll3_80", "pll3_d8", ++ 5, 1); ++ ++static SPACEMIT_CCU_FACTOR(pll3_40, "pll3_40", "pll3_d8", ++ 10, 1); ++ ++static SPACEMIT_CCU_FACTOR(pll3_20, "pll3_20", "pll3_d8", ++ 20, 1); ++ ++/* pll1_d8 */ ++static SPACEMIT_CCU_GATE(pll1_d8_307p2, "pll1_d8_307p2", "pll1_d8", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(13), BIT(13), 0x0, ++ CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_FACTOR(pll1_d32_76p8, ++ "pll1_d32_76p8", "pll1_d8_307p2", ++ 4, 1); ++ ++static SPACEMIT_CCU_FACTOR(pll1_d40_61p44, ++ "pll1_d40_61p44", "pll1_d8_307p2", ++ 5, 1); ++ ++static SPACEMIT_CCU_FACTOR(pll1_d16_153p6, ++ "pll1_d16_153p6", "pll1_d8", ++ 2, 1); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d24_102p4, ++ "pll1_d24_102p4", "pll1_d8", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(12), BIT(12), 0x0, ++ 3, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d48_51p2, ++ "pll1_d48_51p2", "pll1_d8", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(7), BIT(7), 0x0, ++ 6, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d48_51p2_ap, ++ "pll1_d48_51p2_ap", "pll1_d8", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(11), BIT(11), 0x0, ++ 6, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_m3d128_57p6, ++ "pll1_m3d128_57p6", "pll1_d8", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(8), BIT(8), 0x0, ++ 16, 3, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d96_25p6, ++ "pll1_d96_25p6", "pll1_d8", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(4), BIT(4), 0x0, ++ 12, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d192_12p8, ++ "pll1_d192_12p8", "pll1_d8", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(3), BIT(3), 0x0, ++ 24, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d192_12p8_wdt, ++ "pll1_d192_12p8_wdt", ++ "pll1_d8", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(19), BIT(19), 0x0, ++ 24, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d384_6p4, ++ "pll1_d384_6p4", "pll1_d8", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(2), BIT(2), 0x0, ++ 48, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_FACTOR(pll1_d768_3p2, ++ "pll1_d768_3p2", "pll1_d384_6p4", ++ 2, 1); ++ ++static SPACEMIT_CCU_FACTOR(pll1_d1536_1p6, ++ "pll1_d1536_1p6", "pll1_d384_6p4", ++ 4, 1); ++ ++static SPACEMIT_CCU_FACTOR(pll1_d3072_0p8, ++ "pll1_d3072_0p8", "pll1_d384_6p4", ++ 8, 1); ++ ++/* pll1_d7 */ ++static SPACEMIT_CCU_FACTOR(pll1_d7_351p08, ++ "pll1_d7_351p08", "pll1_d7", ++ 1, 1); ++ ++/* pll1_d6 */ ++static SPACEMIT_CCU_GATE(pll1_d6_409p6, ++ "pll1_d6_409p6", "pll1_d6", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(0), BIT(0), 0x0, ++ CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d12_204p8, ++ "pll1_d12_204p8", "pll1_d6", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(5), BIT(5), 0x0, ++ 2, 1, CLK_IGNORE_UNUSED); ++ ++/* pll1_d5 */ ++static SPACEMIT_CCU_GATE(pll1_d5_491p52, ++ "pll1_d5_491p52", "pll1_d5", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(21), BIT(21), 0x0, ++ CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d10_245p76, ++ "pll1_d10_245p76", "pll1_d5", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(18), BIT(18), 0x0, ++ 2, 1, CLK_IGNORE_UNUSED); ++ ++/* pll1_d4 */ ++static SPACEMIT_CCU_GATE(pll1_d4_614p4, ++ "pll1_d4_614p4", "pll1_d4", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(15), BIT(15), 0x0, ++ CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d52_47p26, ++ "pll1_d52_47p26", "pll1_d4", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(10), BIT(10), 0x0, ++ 13, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d78_31p5, ++ "pll1_d78_31p5", "pll1_d4", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(6), BIT(6), 0x0, ++ 39, 2, CLK_IGNORE_UNUSED); ++ ++/* pll1_d3 */ ++static SPACEMIT_CCU_GATE(pll1_d3_819p2, ++ "pll1_d3_819p2", "pll1_d3", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(14), BIT(14), 0x0, ++ CLK_IGNORE_UNUSED); ++ ++/* pll1_d2 */ ++static SPACEMIT_CCU_GATE(pll1_d2_1228p8, ++ "pll1_d2_1228p8", "pll1_d2", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(16), BIT(16), 0x0, ++ CLK_IGNORE_UNUSED); ++ ++/* dpll */ ++static const struct ccu_dpll_rate_tbl dpll1_rate_tbl[] = { ++ DPLL_RATE(2400000000UL, 0x00, 0x00, 0x20, ++ 0x2a, 0x32, 0x64, 0xdd, 0x50), ++ DPLL_RATE(2400000000UL, 0x00, 0x3b, 0x20, ++ 0x2a, 0x32, 0x64, 0xdd, 0x50), ++}; ++ ++static const struct ccu_dpll_rate_tbl dpll2_rate_tbl[] = { ++ DPLL_RATE(3200000000UL, 0x55, 0x55, 0x3d, ++ 0x2a, 0x43, 0x67, 0xdd, 0x50), ++}; ++ ++static SPACEMIT_CCU_DPLL(dpll1, "dpll1", &dpll1_rate_tbl, ++ ARRAY_SIZE(dpll1_rate_tbl), ++ BASE_TYPE_APMU, APMU_DPLL1_CLK_CTRL1, APMU_DPLL1_CLK_CTRL2, ++ 0, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_DPLL(dpll2, "dpll2", &dpll2_rate_tbl, ++ ARRAY_SIZE(dpll2_rate_tbl), ++ BASE_TYPE_APMU, APMU_DPLL2_CLK_CTRL1, APMU_DPLL2_CLK_CTRL2, ++ 0, CLK_IGNORE_UNUSED); ++ ++static const char * const dfc_lvl_parents[] = { ++ "dpll2", "dpll1" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX(dfc_lvl0, "dfc_lvl0", ++ dfc_lvl_parents, BASE_TYPE_APMU, APMU_DFC_LEVEL0, ++ 14, 2, 8, 1, 0); ++ ++static SPACEMIT_CCU_DIV_MUX(dfc_lvl1, "dfc_lvl1", ++ dfc_lvl_parents, BASE_TYPE_APMU, APMU_DFC_LEVEL1, ++ 14, 2, 8, 1, 0); ++ ++static SPACEMIT_CCU_DIV_MUX(dfc_lvl2, "dfc_lvl2", ++ dfc_lvl_parents, BASE_TYPE_APMU, APMU_DFC_LEVEL2, ++ 14, 2, 8, 1, 0); ++ ++static SPACEMIT_CCU_DIV_MUX(dfc_lvl3, "dfc_lvl3", ++ dfc_lvl_parents, BASE_TYPE_APMU, APMU_DFC_LEVEL3, ++ 14, 2, 8, 1, 0); ++ ++static SPACEMIT_CCU_DIV_MUX(dfc_lvl4, "dfc_lvl4", ++ dfc_lvl_parents, BASE_TYPE_APMU, APMU_DFC_LEVEL4, ++ 14, 2, 8, 1, 0); ++ ++static SPACEMIT_CCU_DIV_MUX(dfc_lvl5, "dfc_lvl5", ++ dfc_lvl_parents, BASE_TYPE_APMU, APMU_DFC_LEVEL5, ++ 14, 2, 8, 1, 0); ++ ++static SPACEMIT_CCU_DIV_MUX(dfc_lvl6, "dfc_lvl6", ++ dfc_lvl_parents, BASE_TYPE_APMU, APMU_DFC_LEVEL6, ++ 14, 2, 8, 1, 0); ++ ++static SPACEMIT_CCU_DIV_MUX(dfc_lvl7, "dfc_lvl7", ++ dfc_lvl_parents, BASE_TYPE_APMU, APMU_DFC_LEVEL7, ++ 14, 2, 8, 1, 0); ++ ++static const char * const ddr_clk_parents[] = { ++ "dfc_lvl0", "dfc_lvl1", "dfc_lvl2", "dfc_lvl3", ++ "dfc_lvl4", "dfc_lvl5", "dfc_lvl6", "dfc_lvl7" ++}; ++ ++static SPACEMIT_CCU_DDR_FC(ddr, "ddr", ddr_clk_parents, ++ BASE_TYPE_APMU, APMU_DFC_AP, BIT(0), ++ 1, 3, 0); ++ ++static struct ccu_ddn_info uart_ddn_mask_info = { ++ .factor = 2, ++ .num_mask = 0x1fff, ++ .den_mask = 0x1fff, ++ .num_shift = 16, ++ .den_shift = 0, ++}; ++ ++static struct ccu_ddn_tbl slow_uart1_tbl[] = { ++ {.num = 125, .den = 24}, ++}; ++ ++static struct ccu_ddn_tbl slow_uart2_tbl[] = { ++ {.num = 6144, .den = 960}, ++}; ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(slow_uart, ++ "slow_uart", NULL, ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DDN(slow_uart1_14p74, ++ "slow_uart1_14p74", "pll1_d16_153p6", ++ &uart_ddn_mask_info, &slow_uart1_tbl, ++ ARRAY_SIZE(slow_uart1_tbl), ++ BASE_TYPE_MPMU, MPMU_SUCCR, ++ CLK_IGNORE_UNUSED); + -+#include "clk.h" ++static SPACEMIT_CCU_DDN(slow_uart2_48, ++ "slow_uart2_48", "pll1_d4_614p4", ++ &uart_ddn_mask_info, &slow_uart2_tbl, ++ ARRAY_SIZE(slow_uart2_tbl), ++ BASE_TYPE_MPMU, MPMU_SUCCR_1, ++ CLK_IGNORE_UNUSED); + -+static struct clk *clks[CLK_END]; -+static struct clk_onecell_data clk_data; ++static const char * const uart_parent_names[] = { ++ "pll1_m3d128_57p6", "slow_uart1_14p74", "slow_uart2_48" ++}; + -+/* Th1520 Fullmask */ -+static u32 share_cnt_x2h_cpusys_clk_en; -+static u32 share_cnt_dmac_cpusys_clk_en; -+static u32 share_cnt_timer0_clk_en; -+static u32 share_cnt_timer1_clk_en; -+static u32 share_cnt_axi4_cpusys2_clk_en; -+static u32 share_cnt_bmu_c910_clk_en; -+static u32 share_cnt_aon2cpu_a2x_clk_en; -+static u32 share_cnt_chip_dbg_clk_en; -+static u32 share_cnt_x2x_cpusys_clk_en; -+static u32 share_cnt_cfg2tee_x2h_clk_en; -+static u32 share_cnt_cpu2aon_x2h_clk_en; -+static u32 share_cnt_cpu2vp_x2p_clk_en; -+static u32 share_cnt_npu_core_clk_en; -+static u32 share_cnt_cpu2peri_x2h_clk_en; -+static u32 share_cnt_cpu2vi_x2h_clk_en; -+static u32 share_cnt_vpsys_axi_aclk_en; -+static u32 share_cnt_gmac1_clk_en; -+static u32 share_cnt_gmac0_clk_en; -+static u32 share_cnt_perisys_apb3_hclk_en; -+static u32 share_cnt_qspi0_clk_en; -+static u32 share_cnt_gmac_axi_clk_en; -+static u32 share_cnt_gpio0_clk_en; -+static u32 share_cnt_gpio1_clk_en; -+static u32 share_cnt_pwm_clk_en; -+static u32 share_cnt_spi_clk_en; -+static u32 share_cnt_uart0_clk_en; -+static u32 share_cnt_uart2_clk_en; -+static u32 share_cnt_i2c2_clk_en; -+static u32 share_cnt_i2c3_clk_en; -+static u32 share_cnt_peri_i2s_clk_en; -+static u32 share_cnt_qspi1_clk_en; -+static u32 share_cnt_uart1_clk_en; -+static u32 share_cnt_uart3_clk_en; -+static u32 share_cnt_uart4_clk_en; -+static u32 share_cnt_uart5_clk_en; -+static u32 share_cnt_i2c0_clk_en; -+static u32 share_cnt_i2c1_clk_en; -+static u32 share_cnt_i2c4_clk_en; -+static u32 share_cnt_i2c5_clk_en; -+static u32 share_cnt_gpio2_clk_en; -+static u32 share_cnt_gpio3_clk_en; -+static u32 share_cnt_vosys_axi_aclk_en; ++static SPACEMIT_CCU_MUX_GATE(uart1_clk, "uart1_clk", ++ uart_parent_names, BASE_TYPE_APBC, APBC_UART1_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(uart2_clk, "uart2_clk", ++ uart_parent_names, BASE_TYPE_APBC, APBC_UART2_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(uart3_clk, "uart3_clk", ++ uart_parent_names, BASE_TYPE_APBC, APBC_UART3_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(uart4_clk, "uart4_clk", ++ uart_parent_names, BASE_TYPE_APBC, APBC_UART4_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(uart5_clk, "uart5_clk", ++ uart_parent_names, BASE_TYPE_APBC, APBC_UART5_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(uart6_clk, "uart6_clk", ++ uart_parent_names, BASE_TYPE_APBC, APBC_UART6_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(uart7_clk, "uart7_clk", ++ uart_parent_names, BASE_TYPE_APBC, APBC_UART7_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(uart8_clk, "uart8_clk", ++ uart_parent_names, BASE_TYPE_APBC, APBC_UART8_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(uart9_clk, "uart9_clk", ++ uart_parent_names, BASE_TYPE_APBC, APBC_UART9_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE(gpio_clk, "gpio_clk", "vctcxo_24", ++ BASE_TYPE_APBC, APBC_GPIO_CLK_RST, ++ 0x3, 0x3, 0x0, ++ 0); ++ ++static const char * const pwm_parent_names[] = { ++ "pll1_d192_12p8", "clk_32k" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(pwm0_clk, "pwm0_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM0_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm1_clk, "pwm1_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM1_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm2_clk, "pwm2_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM2_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm3_clk, "pwm3_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM3_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm4_clk, "pwm4_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM4_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm5_clk, "pwm5_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM5_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm6_clk, "pwm6_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM6_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm7_clk, "pwm7_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM7_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm8_clk, "pwm8_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM8_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm9_clk, "pwm9_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM9_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm10_clk, "pwm10_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM10_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm11_clk, "pwm11_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM11_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm12_clk, "pwm12_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM12_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm13_clk, "pwm13_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM13_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm14_clk, "pwm14_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM14_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm15_clk, "pwm15_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM15_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm16_clk, "pwm16_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM16_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm17_clk, "pwm17_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM17_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm18_clk, "pwm18_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM18_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm19_clk, "pwm19_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM19_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static const char * const ssp_parent_names[] = { ++ "pll1_d384_6p4", "pll1_d192_12p8", "pll1_d96_25p6", ++ "pll1_d48_51p2", "pll1_d768_3p2", "pll1_d1536_1p6", ++ "pll1_d3072_0p8" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(ssp3_clk, ++ "ssp3_clk", ssp_parent_names, ++ BASE_TYPE_APBC, APBC_SSP3_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE(rtc_clk, "rtc_clk", "clk_32k", ++ BASE_TYPE_APBC, APBC_RTC_CLK_RST, ++ 0x83, 0x83, 0x0, 0); ++ ++static const char * const twsi_parent_names[] = { ++ "pll1_d78_31p5", "pll1_d48_51p2", "pll1_d40_61p44" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(twsi0_clk, "twsi0_clk", ++ twsi_parent_names, BASE_TYPE_APBC, APBC_TWSI0_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(twsi1_clk, "twsi1_clk", ++ twsi_parent_names, BASE_TYPE_APBC, APBC_TWSI1_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(twsi2_clk, "twsi2_clk", ++ twsi_parent_names, BASE_TYPE_APBC, APBC_TWSI2_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(twsi4_clk, "twsi4_clk", ++ twsi_parent_names, BASE_TYPE_APBC, APBC_TWSI4_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(twsi5_clk, "twsi5_clk", ++ twsi_parent_names, BASE_TYPE_APBC, APBC_TWSI5_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(twsi6_clk, "twsi6_clk", ++ twsi_parent_names, BASE_TYPE_APBC, APBC_TWSI6_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(twsi7_clk, "twsi7_clk", ++ twsi_parent_names, BASE_TYPE_APBC, APBC_TWSI7_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(twsi8_clk, "twsi8_clk", ++ twsi_parent_names, BASE_TYPE_APBC, APBC_TWSI8_CLK_RST, ++ 4, 3, 0x7, 0x3, 0x4, ++ 0); ++ ++static const char * const timer_parent_names[] = { ++ "pll1_d192_12p8", "clk_32k", "pll1_d384_6p4", ++ "vctcxo_3", "vctcxo_1" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(timers1_clk, "timers1_clk", ++ timer_parent_names, BASE_TYPE_APBC, APBC_TIMERS1_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(timers2_clk, ++ "timers2_clk", timer_parent_names, ++ BASE_TYPE_APBC, APBC_TIMERS2_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE(aib_clk, "aib_clk", "vctcxo_24", ++ BASE_TYPE_APBC, APBC_AIB_CLK_RST, ++ 0x3, 0x3, 0x0, 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(onewire_clk, ++ "onewire_clk", NULL, ++ BASE_TYPE_APBC, APBC_ONEWIRE_CLK_RST, ++ 0x3, 0x3, 0x0, 0); + -+/* Th1520 Fullmask PLL Bypass */ -+static const char * const cpu_pll0_bypass_sels[] = {"cpu_pll0_foutpostdiv", "osc_24m", }; -+static const char * const cpu_pll1_bypass_sels[] = {"cpu_pll1_foutpostdiv", "osc_24m", }; -+static const char * const gmac_pll_bypass_sels[] = {"gmac_pll_foutpostdiv", "osc_24m", }; -+static const char * const video_pll_bypass_sels[] = {"video_pll_foutpostdiv", "osc_24m", }; -+static const char * const tee_pll_bypass_sels[] = {"tee_pll_foutpostdiv", "osc_24m"}; -+static const char * const dpu0_pll_bypass_sels[] = {"dpu0_pll_foutpostdiv", "osc_24m"}; -+static const char * const dpu1_pll_bypass_sels[] = {"dpu1_pll_foutpostdiv", "osc_24m"}; ++static SPACEMIT_CCU_GATE_FACTOR(i2s_sysclk, ++ "i2s_sysclk", "pll1_d8_307p2", ++ BASE_TYPE_MPMU, MPMU_ISCCR, ++ BIT(31), BIT(31), 0x0, ++ 200, 1, 0); ++ ++static SPACEMIT_CCU_GATE_FACTOR(i2s_bclk, ++ "i2s_bclk", "i2s_sysclk", ++ BASE_TYPE_MPMU, MPMU_ISCCR, ++ BIT(29), BIT(29), 0x0, ++ 1, 1, 0); + -+/* th1520 fullmask mux */ -+static const char * const ahb2_cpusys_hclk_sels[] = {"ahb2_cpusys_hclk_out_div", "osc_24m"}; -+static const char * const c910_cclk_i0_sels[] = {"cpu_pll0_foutpostdiv", "osc_24m"}; -+static const char * const c910_cclk_sels[] = {"c910_cclk_i0", "cpu_pll1_foutpostdiv"}; -+static const char * const cfg_axi_aclk_sels[] = {"cfg_axi_aclk_out_div", "osc_24m"}; ++static const char * const sspa_parent_names[] = { ++ "pll1_d384_6p4", "pll1_d192_12p8", "pll1_d96_25p6", ++ "pll1_d48_51p2", "pll1_d768_3p2", "pll1_d1536_1p6", ++ "pll1_d3072_0p8", "i2s_bclk" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(sspa0_clk, "sspa0_clk", sspa_parent_names, ++ BASE_TYPE_APBC, APBC_SSPA0_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(sspa1_clk, "sspa1_clk", sspa_parent_names, ++ BASE_TYPE_APBC, APBC_SSPA1_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(dro_clk, "dro_clk", NULL, ++ BASE_TYPE_APBC, APBC_DRO_CLK_RST, ++ 0x1, 0x1, 0x0, 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(ir_clk, "ir_clk", NULL, ++ BASE_TYPE_APBC, APBC_IR_CLK_RST, ++ 0x1, 0x1, 0x0, 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(tsen_clk, "tsen_clk", NULL, ++ BASE_TYPE_APBC, APBC_TSEN_CLK_RST, ++ 0x3, 0x3, 0x0, 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(ipc_ap2aud_clk, "ipc_ap2aud_clk", ++ NULL, BASE_TYPE_APBC, APBC_IPC_AP2AUD_CLK_RST, ++ 0x3, 0x3, 0x0, 0); ++ ++static const char * const can_parent_names[] = { ++ "pll3_20", "pll3_40", "pll3_80" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(can0_clk, "can0_clk", can_parent_names, ++ BASE_TYPE_APBC, APBC_CAN0_CLK_RST, ++ 4, 3, BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(can0_bus_clk, "can0_bus_clk", NULL, ++ BASE_TYPE_APBC, APBC_CAN0_CLK_RST, ++ BIT(0), BIT(0), 0x0, 0); ++ ++static SPACEMIT_CCU_GATE(wdt_clk, "wdt_clk", "pll1_d96_25p6", ++ BASE_TYPE_MPMU, MPMU_WDTPCR, ++ 0x3, 0x3, 0x0, 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(ripc_clk, "ripc_clk", NULL, ++ BASE_TYPE_MPMU, MPMU_RIPCCR, ++ 0x3, 0x3, 0x0, 0); ++ ++static const char * const jpg_parent_names[] = { ++ "pll1_d4_614p4", "pll1_d6_409p6", "pll1_d5_491p52", ++ "pll1_d3_819p2", "pll1_d2_1228p8", "pll2_d4", "pll2_d3" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(jpg_clk, "jpg_clk", ++ jpg_parent_names, BASE_TYPE_APMU, APMU_JPG_CLK_RES_CTRL, ++ 5, 3, BIT(15), ++ 2, 3, BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(jpg_4kafbc_clk, "jpg_4kafbc_clk", ++ NULL, BASE_TYPE_APMU, APMU_JPG_CLK_RES_CTRL, ++ BIT(16), BIT(16), 0x0, 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(jpg_2kafbc_clk, "jpg_2kafbc_clk", ++ NULL, BASE_TYPE_APMU, APMU_JPG_CLK_RES_CTRL, ++ BIT(17), BIT(17), 0x0, 0); ++ ++static const char * const ccic2phy_parent_names[] = { ++ "pll1_d24_102p4", "pll1_d48_51p2_ap" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(ccic2phy_clk, "ccic2phy_clk", ++ ccic2phy_parent_names, ++ BASE_TYPE_APMU, APMU_CSI_CCIC2_CLK_RES_CTRL, ++ 7, 1, BIT(5), BIT(5), 0x0, ++ 0); ++ ++static const char * const ccic3phy_parent_names[] = { ++ "pll1_d24_102p4", "pll1_d48_51p2_ap" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(ccic3phy_clk, "ccic3phy_clk", ++ ccic3phy_parent_names, ++ BASE_TYPE_APMU, APMU_CSI_CCIC2_CLK_RES_CTRL, ++ 31, 1, BIT(30), BIT(30), 0x0, 0); ++ ++static const char * const csi_parent_names[] = { ++ "pll1_d5_491p52", "pll1_d6_409p6", "pll1_d4_614p4", ++ "pll1_d3_819p2", "pll2_d2", "pll2_d3", "pll2_d4", ++ "pll1_d2_1228p8" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(csi_clk, "csi_clk", ++ csi_parent_names, BASE_TYPE_APMU, APMU_CSI_CCIC2_CLK_RES_CTRL, ++ 20, 3, BIT(15), ++ 16, 3, BIT(4), BIT(4), 0x0, ++ 0); ++ ++static const char * const camm_parent_names[] = { ++ "pll1_d8_307p2", "pll2_d5", "pll1_d6_409p6", "vctcxo_24" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(camm0_clk, "camm0_clk", ++ camm_parent_names, BASE_TYPE_APMU, APMU_CSI_CCIC2_CLK_RES_CTRL, ++ 23, 4, 8, 2, ++ BIT(28), BIT(28), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(camm1_clk, "camm1_clk", ++ camm_parent_names, BASE_TYPE_APMU, APMU_CSI_CCIC2_CLK_RES_CTRL, ++ 23, 4, 8, 2, ++ BIT(6), BIT(6), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(camm2_clk, "camm2_clk", ++ camm_parent_names, BASE_TYPE_APMU, APMU_CSI_CCIC2_CLK_RES_CTRL, ++ 23, 4, 8, 2, ++ BIT(3), BIT(3), 0x0, ++ 0); ++ ++static const char * const isp_cpp_parent_names[] = { ++ "pll1_d8_307p2", "pll1_d6_409p6" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(isp_cpp_clk, "isp_cpp_clk", ++ isp_cpp_parent_names, ++ BASE_TYPE_APMU, APMU_ISP_CLK_RES_CTRL, ++ 24, 2, 26, 1, ++ BIT(28), BIT(28), 0x0, ++ 0); ++ ++static const char * const isp_bus_parent_names[] = { ++ "pll1_d6_409p6", "pll1_d5_491p52", "pll1_d8_307p2", ++ "pll1_d10_245p76" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(isp_bus_clk, "isp_bus_clk", ++ isp_bus_parent_names, ++ BASE_TYPE_APMU, APMU_ISP_CLK_RES_CTRL, ++ 18, 3, BIT(23), ++ 21, 2, BIT(17), BIT(17), 0x0, ++ 0); ++ ++static const char * const isp_parent_names[] = { ++ "pll1_d6_409p6", "pll1_d5_491p52", "pll1_d4_614p4", ++ "pll1_d8_307p2" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(isp_clk, "isp_clk", ++ isp_parent_names, BASE_TYPE_APMU, APMU_ISP_CLK_RES_CTRL, ++ 4, 3, BIT(7), ++ 8, 2, BIT(1), BIT(1), 0x0, ++ 0); ++ ++static const char * const dpumclk_parent_names[] = { ++ "pll1_d6_409p6", "pll1_d5_491p52", "pll1_d4_614p4", ++ "pll1_d8_307p2" ++}; ++ ++static SPACEMIT_CCU_DIV2_FC_MUX_GATE(dpu_mclk, "dpu_mclk", ++ dpumclk_parent_names, BASE_TYPE_APMU, ++ APMU_LCD_CLK_RES_CTRL1, APMU_LCD_CLK_RES_CTRL2, ++ 1, 4, BIT(29), ++ 5, 3, BIT(0), BIT(0), 0x0, ++ 0); ++ ++static const char * const dpuesc_parent_names[] = { ++ "pll1_d48_51p2_ap", "pll1_d52_47p26", "pll1_d96_25p6", ++ "pll1_d32_76p8" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(dpu_esc_clk, "dpu_esc_clk", dpuesc_parent_names, ++ BASE_TYPE_APMU, APMU_LCD_CLK_RES_CTRL1, ++ 0, 2, BIT(2), BIT(2), 0x0, ++ 0); ++ ++static const char * const dpubit_parent_names[] = { ++ "pll1_d3_819p2", "pll2_d2", "pll2_d3", "pll1_d2_1228p8", ++ "pll2_d4", "pll2_d5", "pll2_d8", "pll2_d8" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(dpu_bit_clk, "dpu_bit_clk", ++ dpubit_parent_names, ++ BASE_TYPE_APMU, APMU_LCD_CLK_RES_CTRL1, ++ 17, 3, BIT(31), ++ 20, 3, BIT(16), BIT(16), 0x0, ++ 0); ++ ++static const char * const dpupx_parent_names[] = { ++ "pll1_d6_409p6", "pll1_d5_491p52", "pll1_d4_614p4", ++ "pll1_d8_307p2", "pll2_d7", "pll2_d8" ++}; ++ ++static SPACEMIT_CCU_DIV2_FC_MUX_GATE(dpu_pxclk, "dpu_pxclk", dpupx_parent_names, ++ BASE_TYPE_APMU, APMU_LCD_CLK_RES_CTRL1, APMU_LCD_CLK_RES_CTRL2, ++ 17, 4, BIT(30), ++ 21, 3, BIT(16), BIT(16), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(dpu_hclk, "dpu_hclk", NULL, ++ BASE_TYPE_APMU, APMU_LCD_CLK_RES_CTRL1, ++ BIT(5), BIT(5), 0x0, ++ 0); ++ ++static const char * const dpu_spi_parent_names[] = { ++ "pll1_d8_307p2", "pll1_d6_409p6", "pll1_d10_245p76", ++ "pll1_d11_223p4", "pll1_d13_189", "pll1_d23_106p8", ++ "pll2_d3", "pll2_d5" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(dpu_spi_clk, "dpu_spi_clk", ++ dpu_spi_parent_names, ++ BASE_TYPE_APMU, APMU_LCD_SPI_CLK_RES_CTRL, ++ 8, 3, BIT(7), ++ 12, 3, BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(dpu_spi_hbus_clk, "dpu_spi_hbus_clk", NULL, ++ BASE_TYPE_APMU, APMU_LCD_SPI_CLK_RES_CTRL, ++ BIT(3), BIT(3), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(dpu_spi_bus_clk, "dpu_spi_bus_clk", NULL, ++ BASE_TYPE_APMU, APMU_LCD_SPI_CLK_RES_CTRL, ++ BIT(5), BIT(5), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(dpu_spi_aclk, "dpu_spi_aclk", NULL, ++ BASE_TYPE_APMU, APMU_LCD_SPI_CLK_RES_CTRL, ++ BIT(6), BIT(6), 0x0, ++ 0); ++ ++static const char * const v2d_parent_names[] = { ++ "pll1_d5_491p52", "pll1_d6_409p6", "pll1_d8_307p2", ++ "pll1_d4_614p4", ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(v2d_clk, "v2d_clk", v2d_parent_names, ++ BASE_TYPE_APMU, APMU_LCD_CLK_RES_CTRL1, ++ 9, 3, BIT(28), ++ 12, 2, BIT(8), BIT(8), 0x0, ++ 0); ++ ++static const char * const ccic_4x_parent_names[] = { ++ "pll1_d5_491p52", "pll1_d6_409p6", "pll1_d4_614p4", ++ "pll1_d3_819p2", "pll2_d2", "pll2_d3", "pll2_d4", ++ "pll1_d2_1228p8" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(ccic_4x_clk, "ccic_4x_clk", ++ ccic_4x_parent_names, ++ BASE_TYPE_APMU, APMU_CCIC_CLK_RES_CTRL, ++ 18, 3, BIT(15), ++ 23, 2, BIT(4), BIT(4), 0x0, ++ 0); ++ ++static const char * const ccic1phy_parent_names[] = { ++ "pll1_d24_102p4", "pll1_d48_51p2_ap" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(ccic1phy_clk, "ccic1phy_clk", ++ ccic1phy_parent_names, ++ BASE_TYPE_APMU, APMU_CCIC_CLK_RES_CTRL, ++ 7, 1, BIT(5), BIT(5), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(sdh_axi_aclk, "sdh_axi_aclk", NULL, ++ BASE_TYPE_APMU, APMU_SDH0_CLK_RES_CTRL, ++ BIT(3), BIT(3), 0x0, ++ 0); ++ ++static const char * const sdh01_parent_names[] = { ++ "pll1_d6_409p6", "pll1_d4_614p4", "pll2_d8", "pll2_d5", ++ "pll1_d11_223p4", "pll1_d13_189", "pll1_d23_106p8" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(sdh0_clk, "sdh0_clk", sdh01_parent_names, ++ BASE_TYPE_APMU, APMU_SDH0_CLK_RES_CTRL, ++ 8, 3, BIT(11), ++ 5, 3, BIT(4), BIT(4), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(sdh1_clk, "sdh1_clk", sdh01_parent_names, ++ BASE_TYPE_APMU, APMU_SDH1_CLK_RES_CTRL, ++ 8, 3, BIT(11), ++ 5, 3, BIT(4), BIT(4), 0x0, ++ 0); ++ ++static const char * const sdh2_parent_names[] = { ++ "pll1_d6_409p6", "pll1_d4_614p4", "pll2_d8", ++ "pll1_d3_819p2", "pll1_d11_223p4", "pll1_d13_189", ++ "pll1_d23_106p8" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(sdh2_clk, "sdh2_clk", sdh2_parent_names, ++ BASE_TYPE_APMU, APMU_SDH2_CLK_RES_CTRL, ++ 8, 3, BIT(11), ++ 5, 3, BIT(4), BIT(4), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(usb_axi_clk, "usb_axi_clk", NULL, ++ BASE_TYPE_APMU, APMU_USB_CLK_RES_CTRL, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(usb_p1_aclk, "usb_p1_aclk", NULL, ++ BASE_TYPE_APMU, APMU_USB_CLK_RES_CTRL, ++ BIT(5), BIT(5), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(usb30_clk, "usb30_clk", NULL, ++ BASE_TYPE_APMU, APMU_USB_CLK_RES_CTRL, ++ BIT(8), BIT(8), 0x0, ++ 0); ++ ++static const char * const qspi_parent_names[] = { ++ "pll1_d6_409p6", "pll2_d8", "pll1_d8_307p2", ++ "pll1_d10_245p76", "pll1_d11_223p4", "pll1_d23_106p8", ++ "pll1_d5_491p52", "pll1_d13_189" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(qspi_clk, "qspi_clk", qspi_parent_names, ++ BASE_TYPE_APMU, APMU_QSPI_CLK_RES_CTRL, ++ 9, 3, ++ 6, 3, BIT(4), BIT(4), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(qspi_bus_clk, "qspi_bus_clk", NULL, ++ BASE_TYPE_APMU, APMU_QSPI_CLK_RES_CTRL, ++ BIT(3), BIT(3), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(dma_clk, "dma_clk", NULL, ++ BASE_TYPE_APMU, APMU_DMA_CLK_RES_CTRL, ++ BIT(3), BIT(3), 0x0, ++ 0); ++ ++static const char * const aes_parent_names[] = { ++ "pll1_d12_204p8", "pll1_d24_102p4" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(aes_clk, "aes_clk", aes_parent_names, ++ BASE_TYPE_APMU, APMU_AES_CLK_RES_CTRL, ++ 6, 1, BIT(5), BIT(5), 0x0, ++ 0); ++ ++static const char * const vpu_parent_names[] = { ++ "pll1_d4_614p4", "pll1_d5_491p52", "pll1_d3_819p2", ++ "pll1_d6_409p6", "pll3_d6", "pll2_d3", "pll2_d4", "pll2_d5" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(vpu_clk, "vpu_clk", vpu_parent_names, ++ BASE_TYPE_APMU, APMU_VPU_CLK_RES_CTRL, ++ 13, 3, BIT(21), ++ 10, 3, ++ BIT(3), BIT(3), 0x0, ++ 0); ++ ++static const char * const gpu_parent_names[] = { ++ "pll1_d4_614p4", "pll1_d5_491p52", "pll1_d3_819p2", "pll1_d6_409p6", ++ "pll3_d6", "pll2_d3", "pll2_d4", "pll2_d5" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(gpu_clk, "gpu_clk", gpu_parent_names, ++ BASE_TYPE_APMU, APMU_GPU_CLK_RES_CTRL, ++ 12, 3, BIT(15), ++ 18, 3, ++ BIT(4), BIT(4), 0x0, ++ 0); ++ ++static const char * const emmc_parent_names[] = { ++ "pll1_d6_409p6", "pll1_d4_614p4", "pll1_d52_47p26", "pll1_d3_819p2" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(emmc_clk, "emmc_clk", emmc_parent_names, ++ BASE_TYPE_APMU, APMU_PMUA_EM_CLK_RES_CTRL, ++ 8, 3, BIT(11), ++ 6, 2, ++ 0x18, 0x18, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_GATE(emmc_x_clk, "emmc_x_clk", "pll1_d2_1228p8", ++ BASE_TYPE_APMU, APMU_PMUA_EM_CLK_RES_CTRL, ++ 12, 3, BIT(15), BIT(15), 0x0, ++ 0); ++ ++static const char * const audio_parent_names[] = { ++ "pll1_aud_245p7", "pll1_d8_307p2", "pll1_d6_409p6" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(audio_clk, "audio_clk", audio_parent_names, ++ BASE_TYPE_APMU, APMU_AUDIO_CLK_RES_CTRL, ++ 4, 3, BIT(15), ++ 7, 3, ++ BIT(12), BIT(12), 0x0, ++ 0); ++ ++static const char * const hdmi_parent_names[] = { ++ "pll1_d6_409p6", "pll1_d5_491p52", "pll1_d4_614p4", "pll1_d8_307p2" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(hdmi_mclk, "hdmi_mclk", hdmi_parent_names, ++ BASE_TYPE_APMU, APMU_HDMI_CLK_RES_CTRL, ++ 1, 4, BIT(29), ++ 5, 3, ++ BIT(0), BIT(0), 0x0, ++ 0); ++ ++static const char * const cci550_parent_names[] = { ++ "pll1_d5_491p52", "pll1_d4_614p4", "pll1_d3_819p2", "pll2_d3" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX(cci550_clk, "cci550_clk", cci550_parent_names, ++ BASE_TYPE_APMU, APMU_CCI550_CLK_CTRL, ++ 8, 3, BIT(12), ++ 0, 2, ++ 0); ++ ++static const char * const pmua_aclk_parent_names[] = { ++ "pll1_d10_245p76", "pll1_d8_307p2" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX(pmua_aclk, "pmua_aclk", pmua_aclk_parent_names, ++ BASE_TYPE_APMU, APMU_ACLK_CLK_CTRL, ++ 1, 2, BIT(4), ++ 0, 1, ++ 0); ++ ++static const char * const cpu_c0_hi_parent_names[] = { ++ "pll3_d2", "pll3_d1" ++}; ++ ++static SPACEMIT_CCU_MUX(cpu_c0_hi_clk, "cpu_c0_hi_clk", cpu_c0_hi_parent_names, ++ BASE_TYPE_APMU, APMU_CPU_C0_CLK_CTRL, ++ 13, 1, ++ 0); ++ ++static const char * const cpu_c0_parent_names[] = { ++ "pll1_d4_614p4", "pll1_d3_819p2", "pll1_d6_409p6", ++ "pll1_d5_491p52", "pll1_d2_1228p8", "pll3_d3", ++ "pll2_d3", "cpu_c0_hi_clk" ++}; ++ ++static SPACEMIT_CCU_MUX_FC(cpu_c0_core_clk, "cpu_c0_core_clk", ++ cpu_c0_parent_names, ++ BASE_TYPE_APMU, APMU_CPU_C0_CLK_CTRL, ++ BIT(12), ++ 0, 3, ++ 0); ++ ++static SPACEMIT_CCU_DIV(cpu_c0_ace_clk, "cpu_c0_ace_clk", "cpu_c0_core_clk", ++ BASE_TYPE_APMU, APMU_CPU_C0_CLK_CTRL, ++ 6, 3, ++ 0); ++ ++static SPACEMIT_CCU_DIV(cpu_c0_tcm_clk, "cpu_c0_tcm_clk", "cpu_c0_core_clk", ++ BASE_TYPE_APMU, APMU_CPU_C0_CLK_CTRL, ++ 9, 3, ++ 0); ++ ++static const char * const cpu_c1_hi_parent_names[] = { ++ "pll3_d2", "pll3_d1" ++}; ++ ++static SPACEMIT_CCU_MUX(cpu_c1_hi_clk, "cpu_c1_hi_clk", cpu_c1_hi_parent_names, ++ BASE_TYPE_APMU, APMU_CPU_C1_CLK_CTRL, ++ 13, 1, ++ 0); ++ ++static const char * const cpu_c1_parent_names[] = { ++ "pll1_d4_614p4", "pll1_d3_819p2", "pll1_d6_409p6", ++ "pll1_d5_491p52", "pll1_d2_1228p8", "pll3_d3", ++ "pll2_d3", "cpu_c1_hi_clk" ++}; ++ ++static SPACEMIT_CCU_MUX_FC(cpu_c1_pclk, "cpu_c1_pclk", cpu_c1_parent_names, ++ BASE_TYPE_APMU, APMU_CPU_C1_CLK_CTRL, ++ BIT(12), ++ 0, 3, ++ 0); ++ ++static SPACEMIT_CCU_DIV(cpu_c1_ace_clk, "cpu_c1_ace_clk", "cpu_c1_pclk", ++ BASE_TYPE_APMU, APMU_CPU_C1_CLK_CTRL, ++ 6, 3, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(pcie0_clk, "pcie0_clk", NULL, ++ BASE_TYPE_APMU, APMU_PCIE_CLK_RES_CTRL_0, ++ 0x7, 0x7, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(pcie1_clk, "pcie1_clk", NULL, ++ BASE_TYPE_APMU, APMU_PCIE_CLK_RES_CTRL_1, ++ 0x7, 0x7, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(pcie2_clk, "pcie2_clk", NULL, ++ BASE_TYPE_APMU, APMU_PCIE_CLK_RES_CTRL_2, ++ 0x7, 0x7, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(emac0_bus_clk, "emac0_bus_clk", NULL, ++ BASE_TYPE_APMU, APMU_EMAC0_CLK_RES_CTRL, ++ BIT(0), BIT(0), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE(emac0_ptp_clk, "emac0_ptp_clk", "pll2_d6", ++ BASE_TYPE_APMU, APMU_EMAC0_CLK_RES_CTRL, ++ BIT(15), BIT(15), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(emac1_bus_clk, "emac1_bus_clk", NULL, ++ BASE_TYPE_APMU, APMU_EMAC1_CLK_RES_CTRL, ++ BIT(0), BIT(0), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE(emac1_ptp_clk, "emac1_ptp_clk", "pll2_d6", ++ BASE_TYPE_APMU, APMU_EMAC1_CLK_RES_CTRL, ++ BIT(15), BIT(15), 0x0, ++ 0); ++ ++static const char * const uart1_sec_parent_names[] = { ++ "pll1_m3d128_57p6", "slow_uart1_14p74", "slow_uart2_48" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(uart1_sec_clk, "uart1_sec_clk", ++ uart1_sec_parent_names, ++ BASE_TYPE_APBC2, APBC2_UART1_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static const char * const ssp2_sec_parent_names[] = { ++ "pll1_d384_6p4", "pll1_d192_12p8", "pll1_d96_25p6", ++ "pll1_d48_51p2", "pll1_d768_3p2", "pll1_d1536_1p6", ++ "pll1_d3072_0p8" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(ssp2_sec_clk, "ssp2_sec_clk", ++ ssp2_sec_parent_names, ++ BASE_TYPE_APBC2, APBC2_SSP2_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static const char * const twsi3_sec_parent_names[] = { ++ "pll1_d78_31p5", "pll1_d48_51p2", "pll1_d40_61p44" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(twsi3_sec_clk, "twsi3_sec_clk", ++ twsi3_sec_parent_names, ++ BASE_TYPE_APBC2, APBC2_TWSI3_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE(rtc_sec_clk, "rtc_sec_clk", "clk_32k", ++ BASE_TYPE_APBC2, APBC2_RTC_CLK_RST, ++ 0x83, 0x83, 0x0, 0); ++ ++static const char * const timer_sec_parent_names[] = { ++ "pll1_d192_12p8", "clk_32k", "pll1_d384_6p4", "vctcxo_3", "vctcxo_1" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(timers0_sec_clk, "timers0_sec_clk", ++ timer_sec_parent_names, ++ BASE_TYPE_APBC2, APBC2_TIMERS0_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static const char * const kpc_sec_parent_names[] = { ++ "pll1_d192_12p8", "clk_32k", "pll1_d384_6p4", "vctcxo_3", "vctcxo_1" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(kpc_sec_clk, "kpc_sec_clk", kpc_sec_parent_names, ++ BASE_TYPE_APBC2, APBC2_KPC_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE(gpio_sec_clk, "gpio_sec_clk", "vctcxo_24", ++ BASE_TYPE_APBC2, APBC2_GPIO_CLK_RST, ++ 0x3, 0x3, 0x0, ++ 0); ++ ++static const char * const apb_parent_names[] = { ++ "pll1_d96_25p6", "pll1_d48_51p2", "pll1_d96_25p6", "pll1_d24_102p4" ++}; ++ ++static SPACEMIT_CCU_MUX(apb_clk, "apb_clk", apb_parent_names, ++ BASE_TYPE_MPMU, MPMU_APBCSCR, ++ 0, 2, 0); ++ ++static const char * const rhdmi_audio_parent_names[] = { ++ "pll1_aud_24p5", "pll1_aud_245p7" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rhdmi_audio_clk, "rhdmi_audio_clk", ++ rhdmi_audio_parent_names, ++ BASE_TYPE_RCPU, RCPU_HDMI_CLK_RST, ++ 4, 11, 16, 2, ++ 0x6, 0x6, 0x0, ++ 0); ++ ++static const char * const rcan_parent_names[] = { ++ "pll3_20", "pll3_40", "pll3_80" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rcan_clk, "rcan_clk", rcan_parent_names, ++ BASE_TYPE_RCPU, RCPU_CAN_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(rcan_bus_clk, "rcan_bus_clk", NULL, ++ BASE_TYPE_RCPU, RCPU_CAN_CLK_RST, ++ BIT(2), BIT(2), 0x0, 0); ++ ++static const char * const rpwm_parent_names[] = { ++ "pll1_aud_245p7", "pll1_aud_24p5" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rpwm0_clk, "rpwm0_clk", rpwm_parent_names, ++ BASE_TYPE_RCPU2, RCPU2_PWM0_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rpwm1_clk, "rpwm1_clk", rpwm_parent_names, ++ BASE_TYPE_RCPU2, RCPU2_PWM1_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rpwm2_clk, "rpwm2_clk", rpwm_parent_names, ++ BASE_TYPE_RCPU2, RCPU2_PWM2_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rpwm3_clk, "rpwm3_clk", rpwm_parent_names, ++ BASE_TYPE_RCPU2, RCPU2_PWM3_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rpwm4_clk, "rpwm4_clk", rpwm_parent_names, ++ BASE_TYPE_RCPU2, RCPU2_PWM4_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rpwm5_clk, "rpwm5_clk", rpwm_parent_names, ++ BASE_TYPE_RCPU2, RCPU2_PWM5_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rpwm6_clk, "rpwm6_clk", rpwm_parent_names, ++ BASE_TYPE_RCPU2, RCPU2_PWM6_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rpwm7_clk, "rpwm7_clk", rpwm_parent_names, ++ BASE_TYPE_RCPU2, RCPU2_PWM7_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rpwm8_clk, "rpwm8_clk", rpwm_parent_names, ++ BASE_TYPE_RCPU2, RCPU2_PWM8_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rpwm9_clk, "rpwm9_clk", rpwm_parent_names, ++ BASE_TYPE_RCPU2, RCPU2_PWM9_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static const char * const ri2c_parent_names[] = { ++ "pll1_d40_61p44", "pll1_d96_25p6", "pll1_d192_12p8", "vctcxo_3" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(ri2c0_clk, "ri2c0_clk", ri2c_parent_names, ++ BASE_TYPE_RCPU, RCPU_I2C0_CLK_RST, ++ 8, 11, 4, 2, ++ 0x6, 0x6, 0x0, ++ 0); ++ ++static const char * const rssp0_parent_names[] = { ++ "pll1_d40_61p44", "pll1_d96_25p6", "pll1_d192_12p8", "vctcxo_3" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rssp0_clk, "rssp0_clk", rssp0_parent_names, ++ BASE_TYPE_RCPU, RCPU_SSP0_CLK_RST, ++ 8, 11, 4, 2, ++ 0x6, 0x6, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(rir_clk, "rir_clk", NULL, ++ BASE_TYPE_RCPU, RCPU_IR_CLK_RST, ++ BIT(2), BIT(2), 0x0, ++ 0); ++ ++static const char * const ruart0_parent_names[] = { ++ "pll1_aud_24p5", "pll1_aud_245p7", "vctcxo_24", "vctcxo_3" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(ruart0_clk, "ruart0_clk", ruart0_parent_names, ++ BASE_TYPE_RCPU, RCPU_UART0_CLK_RST, ++ 8, 11, 4, 2, ++ 0x6, 0x6, 0x0, ++ 0); ++ ++static const char * const ruart1_parent_names[] = { ++ "pll1_aud_24p5", "pll1_aud_245p7", "vctcxo_24", "vctcxo_3" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(ruart1_clk, "ruart1_clk", ruart1_parent_names, ++ BASE_TYPE_RCPU, RCPU_UART1_CLK_RST, ++ 8, 11, 4, 2, ++ 0x6, 0x6, 0x0, ++ 0); ++ ++static struct clk_hw_onecell_data spacemit_k1x_hw_clks = { ++ .hws = { ++ [CLK_PLL2] = &pll2.common.hw, ++ [CLK_PLL3] = &pll3.common.hw, ++ [CLK_PLL1_D2] = &pll1_d2.common.hw, ++ [CLK_PLL1_D3] = &pll1_d3.common.hw, ++ [CLK_PLL1_D4] = &pll1_d4.common.hw, ++ [CLK_PLL1_D5] = &pll1_d5.common.hw, ++ [CLK_PLL1_D6] = &pll1_d6.common.hw, ++ [CLK_PLL1_D7] = &pll1_d7.common.hw, ++ [CLK_PLL1_D8] = &pll1_d8.common.hw, ++ [CLK_PLL1_D11] = &pll1_d11_223p4.common.hw, ++ [CLK_PLL1_D13] = &pll1_d13_189.common.hw, ++ [CLK_PLL1_D23] = &pll1_d23_106p8.common.hw, ++ [CLK_PLL1_D64] = &pll1_d64_38p4.common.hw, ++ [CLK_PLL1_D10_AUD] = &pll1_aud_245p7.common.hw, ++ [CLK_PLL1_D100_AUD] = &pll1_aud_24p5.common.hw, ++ [CLK_PLL2_D1] = &pll2_d1.common.hw, ++ [CLK_PLL2_D2] = &pll2_d2.common.hw, ++ [CLK_PLL2_D3] = &pll2_d3.common.hw, ++ [CLK_PLL2_D4] = &pll2_d4.common.hw, ++ [CLK_PLL2_D5] = &pll2_d5.common.hw, ++ [CLK_PLL2_D6] = &pll2_d6.common.hw, ++ [CLK_PLL2_D7] = &pll2_d7.common.hw, ++ [CLK_PLL2_D8] = &pll2_d8.common.hw, ++ [CLK_PLL3_D1] = &pll3_d1.common.hw, ++ [CLK_PLL3_D2] = &pll3_d2.common.hw, ++ [CLK_PLL3_D3] = &pll3_d3.common.hw, ++ [CLK_PLL3_D4] = &pll3_d4.common.hw, ++ [CLK_PLL3_D5] = &pll3_d5.common.hw, ++ [CLK_PLL3_D6] = &pll3_d6.common.hw, ++ [CLK_PLL3_D7] = &pll3_d7.common.hw, ++ [CLK_PLL3_D8] = &pll3_d8.common.hw, ++ [CLK_PLL3_80] = &pll3_80.common.hw, ++ [CLK_PLL3_40] = &pll3_40.common.hw, ++ [CLK_PLL3_20] = &pll3_20.common.hw, ++ [CLK_PLL1_307P2] = &pll1_d8_307p2.common.hw, ++ [CLK_PLL1_76P8] = &pll1_d32_76p8.common.hw, ++ [CLK_PLL1_61P44] = &pll1_d40_61p44.common.hw, ++ [CLK_PLL1_153P6] = &pll1_d16_153p6.common.hw, ++ [CLK_PLL1_102P4] = &pll1_d24_102p4.common.hw, ++ [CLK_PLL1_51P2] = &pll1_d48_51p2.common.hw, ++ [CLK_PLL1_51P2_AP] = &pll1_d48_51p2_ap.common.hw, ++ [CLK_PLL1_57P6] = &pll1_m3d128_57p6.common.hw, ++ [CLK_PLL1_25P6] = &pll1_d96_25p6.common.hw, ++ [CLK_PLL1_12P8] = &pll1_d192_12p8.common.hw, ++ [CLK_PLL1_12P8_WDT] = &pll1_d192_12p8_wdt.common.hw, ++ [CLK_PLL1_6P4] = &pll1_d384_6p4.common.hw, ++ [CLK_PLL1_3P2] = &pll1_d768_3p2.common.hw, ++ [CLK_PLL1_1P6] = &pll1_d1536_1p6.common.hw, ++ [CLK_PLL1_0P8] = &pll1_d3072_0p8.common.hw, ++ [CLK_PLL1_351] = &pll1_d7_351p08.common.hw, ++ [CLK_PLL1_409P6] = &pll1_d6_409p6.common.hw, ++ [CLK_PLL1_204P8] = &pll1_d12_204p8.common.hw, ++ [CLK_PLL1_491] = &pll1_d5_491p52.common.hw, ++ [CLK_PLL1_245P76] = &pll1_d10_245p76.common.hw, ++ [CLK_PLL1_614] = &pll1_d4_614p4.common.hw, ++ [CLK_PLL1_47P26] = &pll1_d52_47p26.common.hw, ++ [CLK_PLL1_31P5] = &pll1_d78_31p5.common.hw, ++ [CLK_PLL1_819] = &pll1_d3_819p2.common.hw, ++ [CLK_PLL1_1228] = &pll1_d2_1228p8.common.hw, ++ [CLK_SLOW_UART1] = &slow_uart1_14p74.common.hw, ++ [CLK_SLOW_UART2] = &slow_uart2_48.common.hw, ++ [CLK_UART1] = &uart1_clk.common.hw, ++ [CLK_UART2] = &uart2_clk.common.hw, ++ [CLK_UART3] = &uart3_clk.common.hw, ++ [CLK_UART4] = &uart4_clk.common.hw, ++ [CLK_UART5] = &uart5_clk.common.hw, ++ [CLK_UART6] = &uart6_clk.common.hw, ++ [CLK_UART7] = &uart7_clk.common.hw, ++ [CLK_UART8] = &uart8_clk.common.hw, ++ [CLK_UART9] = &uart9_clk.common.hw, ++ [CLK_GPIO] = &gpio_clk.common.hw, ++ [CLK_PWM0] = &pwm0_clk.common.hw, ++ [CLK_PWM1] = &pwm1_clk.common.hw, ++ [CLK_PWM2] = &pwm2_clk.common.hw, ++ [CLK_PWM3] = &pwm3_clk.common.hw, ++ [CLK_PWM4] = &pwm4_clk.common.hw, ++ [CLK_PWM5] = &pwm5_clk.common.hw, ++ [CLK_PWM6] = &pwm6_clk.common.hw, ++ [CLK_PWM7] = &pwm7_clk.common.hw, ++ [CLK_PWM8] = &pwm8_clk.common.hw, ++ [CLK_PWM9] = &pwm9_clk.common.hw, ++ [CLK_PWM10] = &pwm10_clk.common.hw, ++ [CLK_PWM11] = &pwm11_clk.common.hw, ++ [CLK_PWM12] = &pwm12_clk.common.hw, ++ [CLK_PWM13] = &pwm13_clk.common.hw, ++ [CLK_PWM14] = &pwm14_clk.common.hw, ++ [CLK_PWM15] = &pwm15_clk.common.hw, ++ [CLK_PWM16] = &pwm16_clk.common.hw, ++ [CLK_PWM17] = &pwm17_clk.common.hw, ++ [CLK_PWM18] = &pwm18_clk.common.hw, ++ [CLK_PWM19] = &pwm19_clk.common.hw, ++ [CLK_SSP3] = &ssp3_clk.common.hw, ++ [CLK_RTC] = &rtc_clk.common.hw, ++ [CLK_TWSI0] = &twsi0_clk.common.hw, ++ [CLK_TWSI1] = &twsi1_clk.common.hw, ++ [CLK_TWSI2] = &twsi2_clk.common.hw, ++ [CLK_TWSI4] = &twsi4_clk.common.hw, ++ [CLK_TWSI5] = &twsi5_clk.common.hw, ++ [CLK_TWSI6] = &twsi6_clk.common.hw, ++ [CLK_TWSI7] = &twsi7_clk.common.hw, ++ [CLK_TWSI8] = &twsi8_clk.common.hw, ++ [CLK_TIMERS1] = &timers1_clk.common.hw, ++ [CLK_TIMERS2] = &timers2_clk.common.hw, ++ [CLK_AIB] = &aib_clk.common.hw, ++ [CLK_ONEWIRE] = &onewire_clk.common.hw, ++ [CLK_SSPA0] = &sspa0_clk.common.hw, ++ [CLK_SSPA1] = &sspa1_clk.common.hw, ++ [CLK_DRO] = &dro_clk.common.hw, ++ [CLK_IR] = &ir_clk.common.hw, ++ [CLK_TSEN] = &tsen_clk.common.hw, ++ [CLK_IPC_AP2AUD] = &ipc_ap2aud_clk.common.hw, ++ [CLK_CAN0] = &can0_clk.common.hw, ++ [CLK_CAN0_BUS] = &can0_bus_clk.common.hw, ++ [CLK_WDT] = &wdt_clk.common.hw, ++ [CLK_RIPC] = &ripc_clk.common.hw, ++ [CLK_JPG] = &jpg_clk.common.hw, ++ [CLK_JPF_4KAFBC] = &jpg_4kafbc_clk.common.hw, ++ [CLK_JPF_2KAFBC] = &jpg_2kafbc_clk.common.hw, ++ [CLK_CCIC2PHY] = &ccic2phy_clk.common.hw, ++ [CLK_CCIC3PHY] = &ccic3phy_clk.common.hw, ++ [CLK_CSI] = &csi_clk.common.hw, ++ [CLK_CAMM0] = &camm0_clk.common.hw, ++ [CLK_CAMM1] = &camm1_clk.common.hw, ++ [CLK_CAMM2] = &camm2_clk.common.hw, ++ [CLK_ISP_CPP] = &isp_cpp_clk.common.hw, ++ [CLK_ISP_BUS] = &isp_bus_clk.common.hw, ++ [CLK_ISP] = &isp_clk.common.hw, ++ [CLK_DPU_MCLK] = &dpu_mclk.common.hw, ++ [CLK_DPU_ESC] = &dpu_esc_clk.common.hw, ++ [CLK_DPU_BIT] = &dpu_bit_clk.common.hw, ++ [CLK_DPU_PXCLK] = &dpu_pxclk.common.hw, ++ [CLK_DPU_HCLK] = &dpu_hclk.common.hw, ++ [CLK_DPU_SPI] = &dpu_spi_clk.common.hw, ++ [CLK_DPU_SPI_HBUS] = &dpu_spi_hbus_clk.common.hw, ++ [CLK_DPU_SPIBUS] = &dpu_spi_bus_clk.common.hw, ++ [CLK_SPU_SPI_ACLK] = &dpu_spi_aclk.common.hw, ++ [CLK_V2D] = &v2d_clk.common.hw, ++ [CLK_CCIC_4X] = &ccic_4x_clk.common.hw, ++ [CLK_CCIC1PHY] = &ccic1phy_clk.common.hw, ++ [CLK_SDH_AXI] = &sdh_axi_aclk.common.hw, ++ [CLK_SDH0] = &sdh0_clk.common.hw, ++ [CLK_SDH1] = &sdh1_clk.common.hw, ++ [CLK_SDH2] = &sdh2_clk.common.hw, ++ [CLK_USB_P1] = &usb_p1_aclk.common.hw, ++ [CLK_USB_AXI] = &usb_axi_clk.common.hw, ++ [CLK_USB30] = &usb30_clk.common.hw, ++ [CLK_QSPI] = &qspi_clk.common.hw, ++ [CLK_QSPI_BUS] = &qspi_bus_clk.common.hw, ++ [CLK_DMA] = &dma_clk.common.hw, ++ [CLK_AES] = &aes_clk.common.hw, ++ [CLK_VPU] = &vpu_clk.common.hw, ++ [CLK_GPU] = &gpu_clk.common.hw, ++ [CLK_EMMC] = &emmc_clk.common.hw, ++ [CLK_EMMC_X] = &emmc_x_clk.common.hw, ++ [CLK_AUDIO] = &audio_clk.common.hw, ++ [CLK_HDMI] = &hdmi_mclk.common.hw, ++ [CLK_CCI550] = &cci550_clk.common.hw, ++ [CLK_PMUA_ACLK] = &pmua_aclk.common.hw, ++ [CLK_CPU_C0_HI] = &cpu_c0_hi_clk.common.hw, ++ [CLK_CPU_C0_CORE] = &cpu_c0_core_clk.common.hw, ++ [CLK_CPU_C0_ACE] = &cpu_c0_ace_clk.common.hw, ++ [CLK_CPU_C0_TCM] = &cpu_c0_tcm_clk.common.hw, ++ [CLK_CPU_C1_HI] = &cpu_c1_hi_clk.common.hw, ++ [CLK_CPU_C1_CORE] = &cpu_c1_pclk.common.hw, ++ [CLK_CPU_C1_ACE] = &cpu_c1_ace_clk.common.hw, ++ [CLK_PCIE0] = &pcie0_clk.common.hw, ++ [CLK_PCIE1] = &pcie1_clk.common.hw, ++ [CLK_PCIE2] = &pcie2_clk.common.hw, ++ [CLK_EMAC0_BUS] = &emac0_bus_clk.common.hw, ++ [CLK_EMAC0_PTP] = &emac0_ptp_clk.common.hw, ++ [CLK_EMAC1_BUS] = &emac1_bus_clk.common.hw, ++ [CLK_EMAC1_PTP] = &emac1_ptp_clk.common.hw, ++ [CLK_SEC_UART1] = &uart1_sec_clk.common.hw, ++ [CLK_SEC_SSP2] = &ssp2_sec_clk.common.hw, ++ [CLK_SEC_TWSI3] = &twsi3_sec_clk.common.hw, ++ [CLK_SEC_RTC] = &rtc_sec_clk.common.hw, ++ [CLK_SEC_TIMERS0] = &timers0_sec_clk.common.hw, ++ [CLK_SEC_KPC] = &kpc_sec_clk.common.hw, ++ [CLK_SEC_GPIO] = &gpio_sec_clk.common.hw, ++ [CLK_APB] = &apb_clk.common.hw, ++ [CLK_SLOW_UART] = &slow_uart.common.hw, ++ [CLK_I2S_SYSCLK] = &i2s_sysclk.common.hw, ++ [CLK_I2S_BCLK] = &i2s_bclk.common.hw, ++ [CLK_RCPU_HDMIAUDIO] = &rhdmi_audio_clk.common.hw, ++ [CLK_RCPU_CAN] = &rcan_clk.common.hw, ++ [CLK_RCPU_CAN_BUS] = &rcan_bus_clk.common.hw, ++ [CLK_RCPU_I2C0] = &ri2c0_clk.common.hw, ++ [CLK_RCPU_SSP0] = &rssp0_clk.common.hw, ++ [CLK_RCPU_IR] = &rir_clk.common.hw, ++ [CLK_RCPU_UART0] = &ruart0_clk.common.hw, ++ [CLK_RCPU_UART1] = &ruart1_clk.common.hw, ++ [CLK_DPLL1] = &dpll1.common.hw, ++ [CLK_DPLL2] = &dpll2.common.hw, ++ [CLK_DFC_LVL0] = &dfc_lvl0.common.hw, ++ [CLK_DFC_LVL1] = &dfc_lvl1.common.hw, ++ [CLK_DFC_LVL2] = &dfc_lvl2.common.hw, ++ [CLK_DFC_LVL3] = &dfc_lvl3.common.hw, ++ [CLK_DFC_LVL4] = &dfc_lvl4.common.hw, ++ [CLK_DFC_LVL5] = &dfc_lvl5.common.hw, ++ [CLK_DFC_LVL6] = &dfc_lvl6.common.hw, ++ [CLK_DFC_LVL7] = &dfc_lvl7.common.hw, ++ [CLK_DDR] = &ddr.common.hw, ++ [CLK_RCPU2_PWM0] = &rpwm0_clk.common.hw, ++ [CLK_RCPU2_PWM1] = &rpwm1_clk.common.hw, ++ [CLK_RCPU2_PWM2] = &rpwm2_clk.common.hw, ++ [CLK_RCPU2_PWM3] = &rpwm3_clk.common.hw, ++ [CLK_RCPU2_PWM4] = &rpwm4_clk.common.hw, ++ [CLK_RCPU2_PWM5] = &rpwm5_clk.common.hw, ++ [CLK_RCPU2_PWM6] = &rpwm6_clk.common.hw, ++ [CLK_RCPU2_PWM7] = &rpwm7_clk.common.hw, ++ [CLK_RCPU2_PWM8] = &rpwm8_clk.common.hw, ++ [CLK_RCPU2_PWM9] = &rpwm9_clk.common.hw, ++ }, ++ .num = CLK_MAX_NO, ++}; ++ ++static struct clk_hw_table bootup_enable_clk_table[] = { ++ {"pll1_d8_307p2", CLK_PLL1_307P2}, ++ {"pll1_d6_409p6", CLK_PLL1_409P6}, ++ {"pll1_d5_491p52", CLK_PLL1_491}, ++ {"pll1_d4_614p4", CLK_PLL1_614}, ++ {"pll1_d3_819p2", CLK_PLL1_819}, ++ {"pll1_d2_1228p8", CLK_PLL1_1228}, ++ {"pll1_d10_245p76", CLK_PLL1_245P76}, ++ {"pll1_d48_51p2", CLK_PLL1_51P2}, ++ {"pll1_d48_51p2_ap", CLK_PLL1_51P2_AP}, ++ {"pll1_d96_25p6", CLK_PLL1_25P6}, ++ {"pll3_d1", CLK_PLL3_D1}, ++ {"pll3_d2", CLK_PLL3_D2}, ++ {"pll3_d3", CLK_PLL3_D3}, ++ {"pll2_d3", CLK_PLL2_D3}, ++ {"apb_clk", CLK_APB}, ++ {"pmua_aclk", CLK_PMUA_ACLK}, ++ {"dma_clk", CLK_DMA}, ++}; ++ ++void spacemit_clocks_enable(struct clk_hw_table *tbl, int tbl_size) ++{ ++ int i; ++ struct clk *clk; ++ struct clk_hw *hw_clk; ++ ++ for (i = 0; i < tbl_size; i++) { ++ hw_clk = spacemit_k1x_hw_clks.hws[tbl[i].clk_hw_id]; ++ clk = clk_hw_get_clk(hw_clk, tbl[i].name); ++ if (!IS_ERR_OR_NULL(clk)) ++ clk_prepare_enable(clk); ++ else ++ pr_err("%s : can't find clk %s\n", ++ __func__, tbl[i].name); ++ } ++} ++ ++unsigned long spacemit_k1x_ddr_freq_tbl[MAX_FREQ_LV + 1] = {0}; ++ ++void spacemit_fill_ddr_freq_tbl(void) ++{ ++ int i; ++ struct clk *clk; ++ struct clk_hw *hw_clk; ++ ++ for (i = 0; i < ARRAY_SIZE(spacemit_k1x_ddr_freq_tbl); i++) { ++ hw_clk = spacemit_k1x_hw_clks.hws[CLK_DFC_LVL0 + i]; ++ clk = clk_hw_get_clk(hw_clk, ddr_clk_parents[i]); ++ ++ if (!IS_ERR_OR_NULL(clk)) ++ spacemit_k1x_ddr_freq_tbl[i] = clk_get_rate(clk); ++ else ++ pr_err("%s : can't find clk %s\n", ++ __func__, ddr_clk_parents[i]); ++ } ++} ++ ++int ccu_common_init(struct clk_hw *hw, struct spacemit_k1x_clk *clk_info) ++{ ++ struct ccu_common *common = hw_to_ccu_common(hw); ++ struct ccu_pll *pll = hw_to_ccu_pll(hw); ++ ++ if (!common) ++ return -1; ++ ++ common->lock = &g_cru_lock; ++ ++ switch (common->base_type) { ++ case BASE_TYPE_MPMU: ++ common->base = clk_info->mpmu_base; ++ break; ++ case BASE_TYPE_APMU: ++ common->base = clk_info->apmu_base; ++ break; ++ case BASE_TYPE_APBC: ++ common->base = clk_info->apbc_base; ++ break; ++ case BASE_TYPE_APBS: ++ common->base = clk_info->apbs_base; ++ break; ++ case BASE_TYPE_CIU: ++ common->base = clk_info->ciu_base; ++ break; ++ case BASE_TYPE_DCIU: ++ common->base = clk_info->dciu_base; ++ break; ++ case BASE_TYPE_DDRC: ++ common->base = clk_info->ddrc_base; ++ break; ++ case BASE_TYPE_AUDC: ++ common->base = clk_info->audio_ctrl_base; ++ break; ++ case BASE_TYPE_APBC2: ++ common->base = clk_info->apbc2_base; ++ break; ++ case BASE_TYPE_RCPU: ++ common->base = clk_info->rcpu_base; ++ break; ++ case BASE_TYPE_RCPU2: ++ common->base = clk_info->rcpu2_base; ++ break; ++ default: ++ common->base = clk_info->apbc_base; ++ break; ++ } ++ ++ if (common->is_pll) ++ pll->pll.lock_base = clk_info->mpmu_base; ++ ++ return 0; ++} ++ ++int spacemit_ccu_probe(struct device_node *node, ++ struct spacemit_k1x_clk *clk_info, ++ struct clk_hw_onecell_data *hw_clks) ++{ ++ int i, ret; ++ ++ for (i = 0; i < hw_clks->num; i++) { ++ struct clk_hw *hw = hw_clks->hws[i]; ++ const char *name; ++ ++ if (!hw) ++ continue; ++ if (!hw->init) ++ continue; ++ ++ ccu_common_init(hw, clk_info); ++ name = hw->init->name; ++ ++ ret = of_clk_hw_register(node, hw); ++ if (ret) { ++ pr_err("Couldn't register clock %d - %s\n", i, name); ++ goto err_clk_unreg; ++ } ++ } ++ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, hw_clks); ++ if (ret) ++ goto err_clk_unreg; ++ ++ spacemit_clocks_enable(bootup_enable_clk_table, ++ ARRAY_SIZE(bootup_enable_clk_table)); ++ spacemit_fill_ddr_freq_tbl(); ++ ++ return 0; ++ ++err_clk_unreg: ++ while (--i >= 0) { ++ struct clk_hw *hw = hw_clks->hws[i]; ++ ++ if (!hw) ++ continue; ++ clk_hw_unregister(hw); ++ } ++ ++ return ret; ++} ++ ++static void spacemit_k1x_ccu_probe(struct device_node *np) ++{ ++ int ret; ++ struct spacemit_k1x_clk *clk_info; ++ struct clk_hw_onecell_data *hw_clks = &spacemit_k1x_hw_clks; ++ ++ if (of_device_is_compatible(np, "spacemit,k1x-clock")) { ++ clk_info = &k1x_clock_controller; ++ ++ clk_info->mpmu_base = of_iomap(np, 0); ++ if (!clk_info->mpmu_base) { ++ pr_err("failed to map mpmu registers\n"); ++ goto out; ++ } ++ ++ clk_info->apmu_base = of_iomap(np, 1); ++ if (!clk_info->apmu_base) { ++ pr_err("failed to map apmu registers\n"); ++ goto out; ++ } ++ ++ clk_info->apbc_base = of_iomap(np, 2); ++ if (!clk_info->apbc_base) { ++ pr_err("failed to map apbc registers\n"); ++ goto out; ++ } ++ ++ clk_info->apbs_base = of_iomap(np, 3); ++ if (!clk_info->apbs_base) { ++ pr_err("failed to map apbs registers\n"); ++ goto out; ++ } ++ ++ clk_info->ciu_base = of_iomap(np, 4); ++ if (!clk_info->ciu_base) { ++ pr_err("failed to map ciu registers\n"); ++ goto out; ++ } ++ ++ clk_info->dciu_base = of_iomap(np, 5); ++ if (!clk_info->dciu_base) { ++ pr_err("failed to map dragon ciu registers\n"); ++ goto out; ++ } ++ ++ clk_info->ddrc_base = of_iomap(np, 6); ++ if (!clk_info->ddrc_base) { ++ pr_err("failed to map ddrc registers\n"); ++ goto out; ++ } ++ ++ clk_info->apbc2_base = of_iomap(np, 7); ++ if (!clk_info->apbc2_base) { ++ pr_err("failed to map apbc2 registers\n"); ++ goto out; ++ } ++ ++ clk_info->rcpu_base = of_iomap(np, 8); ++ if (!clk_info->rcpu_base) { ++ pr_err("failed to map rcpu registers\n"); ++ goto out; ++ } ++ ++ clk_info->rcpu2_base = of_iomap(np, 9); ++ if (!clk_info->rcpu2_base) { ++ pr_err("failed to map rcpu2 registers\n"); ++ goto out; ++ } ++ } else { ++ pr_err("not spacemit,k1x-clock\n"); ++ goto out; ++ } ++ ret = spacemit_ccu_probe(np, clk_info, hw_clks); ++ if (ret) ++ return; ++out: ++ return; ++} ++ ++void *spacemit_get_ddr_freq_tbl(void) ++{ ++ return spacemit_k1x_ddr_freq_tbl; ++} ++EXPORT_SYMBOL_GPL(spacemit_get_ddr_freq_tbl); ++ ++u32 spacemit_get_ddr_freq_level(void) ++{ ++ u32 ddr_freq_lvl = 0; ++ struct clk_hw *hw = spacemit_k1x_hw_clks.hws[CLK_DDR]; ++ ++ ddr_freq_lvl = clk_hw_get_parent_index(hw); ++ ++ return ddr_freq_lvl; ++} ++EXPORT_SYMBOL_GPL(spacemit_get_ddr_freq_level); ++ ++int spacemit_set_ddr_freq_level(u32 level) ++{ ++ int ret = 0; ++ struct clk_hw *hw = spacemit_k1x_hw_clks.hws[CLK_DDR]; ++ ++ if (level < 0 || level > MAX_FREQ_LV) ++ return -EINVAL; ++ ++ ret = clk_hw_set_parent(hw, clk_hw_get_parent_by_index(hw, level)); ++ if (ret) ++ pr_err("%s : set ddr freq fail\n", __func__); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(spacemit_set_ddr_freq_level); ++ ++CLK_OF_DECLARE(k1x_clock, "spacemit,k1x-clock", spacemit_k1x_ccu_probe); ++ +diff --git a/drivers/clk/spacemit/ccu-spacemit-k1x.h b/drivers/clk/spacemit/ccu-spacemit-k1x.h +new file mode 100644 +index 000000000000..2662b9e40400 +--- /dev/null ++++ b/drivers/clk/spacemit/ccu-spacemit-k1x.h +@@ -0,0 +1,81 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (c) 2023, spacemit Corporation. */ ++ ++#ifndef _CCU_SPACEMIT_K1X_H_ ++#define _CCU_SPACEMIT_K1X_H_ ++ ++#include ++#include ++ ++enum ccu_base_type { ++ BASE_TYPE_MPMU = 0, ++ BASE_TYPE_APMU = 1, ++ BASE_TYPE_APBC = 2, ++ BASE_TYPE_APBS = 3, ++ BASE_TYPE_CIU = 4, ++ BASE_TYPE_DCIU = 5, ++ BASE_TYPE_DDRC = 6, ++ BASE_TYPE_AUDC = 7, ++ BASE_TYPE_APBC2 = 8, ++ BASE_TYPE_RCPU = 9, ++ BASE_TYPE_RCPU2 = 10, ++}; ++ ++enum { ++ CLK_DIV_TYPE_1REG_NOFC_V1 = 0, ++ CLK_DIV_TYPE_1REG_FC_V2, ++ CLK_DIV_TYPE_2REG_NOFC_V3, ++ CLK_DIV_TYPE_2REG_FC_V4, ++ CLK_DIV_TYPE_1REG_FC_DIV_V5, ++ CLK_DIV_TYPE_1REG_FC_MUX_V6, ++}; ++ ++struct ccu_common { ++ void __iomem *base; ++ enum ccu_base_type base_type; ++ u32 reg_type; ++ u32 reg_ctrl; ++ u32 reg_sel; ++ u32 reg_xtc; ++ u32 fc; ++ bool is_pll; ++ const char *name; ++ const struct clk_ops *ops; ++ const char * const *parent_names; ++ u8 num_parents; ++ unsigned long flags; ++ spinlock_t *lock; ++ struct clk_hw hw; ++}; ++ ++struct spacemit_k1x_clk { ++ void __iomem *mpmu_base; ++ void __iomem *apmu_base; ++ void __iomem *apbc_base; ++ void __iomem *apbs_base; ++ void __iomem *ciu_base; ++ void __iomem *dciu_base; ++ void __iomem *ddrc_base; ++ void __iomem *audio_ctrl_base; ++ void __iomem *apbc2_base; ++ void __iomem *rcpu_base; ++ void __iomem *rcpu2_base; ++}; ++ ++struct clk_hw_table { ++ char *name; ++ u32 clk_hw_id; ++}; ++ ++extern spinlock_t g_cru_lock; ++ ++static inline struct ccu_common *hw_to_ccu_common(struct clk_hw *hw) ++{ ++ return container_of(hw, struct ccu_common, hw); ++} ++ ++int spacemit_ccu_probe(struct device_node *node, ++ struct spacemit_k1x_clk *clk_info, ++ struct clk_hw_onecell_data *desc); ++ ++#endif /* _CCU_SPACEMIT_K1X_H_ */ +diff --git a/drivers/clk/spacemit/ccu_ddn.c b/drivers/clk/spacemit/ccu_ddn.c +new file mode 100644 +index 000000000000..a23d9dad8e32 +--- /dev/null ++++ b/drivers/clk/spacemit/ccu_ddn.c +@@ -0,0 +1,161 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Spacemit clock type ddn ++ * ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++ ++#include ++#include ++#include "ccu_ddn.h" ++ ++/* ++ * It is M/N clock ++ * ++ * Fout from synthesizer can be given from two equations: ++ * numerator/denominator = Fin / (Fout * factor) ++ */ ++ ++static void ccu_ddn_disable(struct clk_hw *hw) ++{ ++ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); ++ struct ccu_common *common = &ddn->common; ++ unsigned long flags; ++ u32 reg; ++ ++ if (!ddn->gate) ++ return; ++ ++ spin_lock_irqsave(common->lock, flags); ++ reg = readl(common->base + common->reg_sel); ++ writel(reg & ~ddn->gate, common->base + common->reg_sel); ++ spin_unlock_irqrestore(common->lock, flags); ++} ++ ++static int ccu_ddn_enable(struct clk_hw *hw) ++{ ++ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); ++ struct ccu_common *common = &ddn->common; ++ unsigned long flags; ++ u32 reg; ++ ++ if (!ddn->gate) ++ return 0; ++ ++ spin_lock_irqsave(common->lock, flags); ++ reg = readl(common->base + common->reg_sel); ++ writel(reg | ddn->gate, common->base + common->reg_sel); ++ spin_unlock_irqrestore(common->lock, flags); ++ ++ return 0; ++} ++ ++static int ccu_ddn_is_enabled(struct clk_hw *hw) ++{ ++ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); ++ struct ccu_common *common = &ddn->common; ++ ++ if (!ddn->gate) ++ return 1; ++ ++ return readl(common->base + common->reg_sel) & ddn->gate; ++} ++ ++static long clk_ddn_round_rate(struct clk_hw *hw, unsigned long drate, ++ unsigned long *prate) ++{ ++ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); ++ struct ccu_ddn_config *params = &ddn->ddn; ++ unsigned long rate = 0, prev_rate; ++ unsigned long result; ++ int i; ++ ++ for (i = 0; i < params->tbl_size; i++) { ++ prev_rate = rate; ++ rate = (((*prate / 10000) * params->tbl[i].den) / ++ (params->tbl[i].num * params->info->factor)) * 10000; ++ if (rate > drate) ++ break; ++ } ++ ++ if (i == 0 || i == params->tbl_size) { ++ result = rate; ++ } else { ++ if ((drate - prev_rate) > (rate - drate)) ++ result = rate; ++ else ++ result = prev_rate; ++ } ++ return result; ++} ++ ++static unsigned long clk_ddn_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); ++ struct ccu_ddn_config *params = &ddn->ddn; ++ unsigned int val, num, den; ++ unsigned long rate; ++ ++ val = readl(ddn->common.base + ddn->common.reg_ctrl); ++ num = (val >> params->info->num_shift) & params->info->num_mask; ++ den = (val >> params->info->den_shift) & params->info->den_mask; ++ if (!den) ++ return 0; ++ ++ rate = (((parent_rate / 10000) * den) / ++ (num * params->info->factor)) * 10000; ++ ++ return rate; ++} ++ ++/* Configures new clock rate*/ ++static int clk_ddn_set_rate(struct clk_hw *hw, unsigned long drate, ++ unsigned long prate) ++{ ++ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); ++ struct ccu_ddn_config *params = &ddn->ddn; ++ int i; ++ unsigned long val; ++ unsigned long prev_rate, rate = 0; ++ unsigned long flags = 0; ++ ++ for (i = 0; i < params->tbl_size; i++) { ++ prev_rate = rate; ++ rate = (((prate / 10000) * params->tbl[i].den) / ++ (params->tbl[i].num * params->info->factor)) * 10000; ++ if (rate > drate) ++ break; ++ } ++ ++ if (i > 0) ++ i--; ++ ++ if (ddn->common.lock) ++ spin_lock_irqsave(ddn->common.lock, flags); ++ ++ val = readl(ddn->common.base + ddn->common.reg_ctrl); ++ val &= ~(params->info->num_mask << params->info->num_shift); ++ val |= (params->tbl[i].num & params->info->num_mask) ++ << params->info->num_shift; ++ val &= ~(params->info->den_mask << params->info->den_shift); ++ val |= (params->tbl[i].den & params->info->den_mask) ++ << params->info->den_shift; ++ writel(val, ddn->common.base + ddn->common.reg_ctrl); ++ ++ if (ddn->common.lock) ++ spin_unlock_irqrestore(ddn->common.lock, flags); ++ ++ return 0; ++} ++ ++const struct clk_ops ccu_ddn_ops = { ++ .disable = ccu_ddn_disable, ++ .enable = ccu_ddn_enable, ++ .is_enabled = ccu_ddn_is_enabled, ++ .recalc_rate = clk_ddn_recalc_rate, ++ .round_rate = clk_ddn_round_rate, ++ .set_rate = clk_ddn_set_rate, ++}; ++ +diff --git a/drivers/clk/spacemit/ccu_ddn.h b/drivers/clk/spacemit/ccu_ddn.h +new file mode 100644 +index 000000000000..577f25250a11 +--- /dev/null ++++ b/drivers/clk/spacemit/ccu_ddn.h +@@ -0,0 +1,86 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (c) 2023, spacemit Corporation. */ ++ ++#ifndef _CCU_DDN_H_ ++#define _CCU_DDN_H_ ++ ++#include ++#include ++ ++#include "ccu-spacemit-k1x.h" ++ ++struct ccu_ddn_tbl { ++ unsigned int num; ++ unsigned int den; ++}; ++ ++struct ccu_ddn_info { ++ unsigned int factor; ++ unsigned int num_mask; ++ unsigned int den_mask; ++ unsigned int num_shift; ++ unsigned int den_shift; ++}; ++ ++struct ccu_ddn_config { ++ struct ccu_ddn_info *info; ++ struct ccu_ddn_tbl *tbl; ++ u32 tbl_size; ++}; ++ ++#define PLL_DDN_TBL(_num, _den) \ ++ { \ ++ .num = (_num), \ ++ .den = (_den), \ ++ } ++ ++struct ccu_ddn { ++ u32 gate; ++ struct ccu_ddn_config ddn; ++ struct ccu_common common; ++}; ++ ++#define _SPACEMIT_CCU_DDN_CONFIG(_info, _table, _size) \ ++ { \ ++ .info = (struct ccu_ddn_info *)_info, \ ++ .tbl = (struct ccu_ddn_tbl *)_table, \ ++ .tbl_size = _size, \ ++ } ++ ++#define SPACEMIT_CCU_DDN(_struct, _name, _parent, _info, _table, \ ++ _size, _base_type, _reg_ctrl, _flags) \ ++ struct ccu_ddn _struct = { \ ++ .ddn = _SPACEMIT_CCU_DDN_CONFIG(_info, _table, _size), \ ++ .common = { \ ++ .reg_ctrl = _reg_ctrl, \ ++ .base_type = _base_type, \ ++ .hw.init = CLK_HW_INIT(_name, \ ++ _parent, &ccu_ddn_ops, _flags), \ ++ } \ ++ } ++ ++#define SPACEMIT_CCU_DDN_GATE(_struct, _name, _parent, _info, \ ++ _table, _size, _base_type, _reg_ddn, \ ++ __reg_gate, _gate_mask, _flags) \ ++ struct ccu_ddn _struct = { \ ++ .gate = _gate_mask, \ ++ .ddn = _SPACEMIT_CCU_DDN_CONFIG(_info, _table, _size), \ ++ .common = { \ ++ .reg_ctrl = _reg_ddn, \ ++ .reg_sel = __reg_gate, \ ++ .base_type = _base_type, \ ++ .hw.init = CLK_HW_INIT(_name, \ ++ _parent, &ccu_ddn_ops, _flags), \ ++ } \ ++ } ++ ++static inline struct ccu_ddn *hw_to_ccu_ddn(struct clk_hw *hw) ++{ ++ struct ccu_common *common = hw_to_ccu_common(hw); ++ ++ return container_of(common, struct ccu_ddn, common); ++} ++ ++extern const struct clk_ops ccu_ddn_ops; ++ ++#endif +diff --git a/drivers/clk/spacemit/ccu_ddr.c b/drivers/clk/spacemit/ccu_ddr.c +new file mode 100644 +index 000000000000..ffd8650a6e79 +--- /dev/null ++++ b/drivers/clk/spacemit/ccu_ddr.c +@@ -0,0 +1,272 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Spacemit clock type ddr ++ * ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include "ccu_ddr.h" ++ ++#define PMU_AP_IMR (0x098) ++#define AP_DCLK_FC_DONE_INT_MSK BIT(15) ++#define DCLK_FC_DONE_INT_MSK BIT(4) ++ ++#define PMU_AP_ISR (0x0a0) ++#define AP_DCLK_FC_DONE_INT_STS BIT(15) ++#define DCLK_FC_DONE_INT_STS BIT(4) ++#define AP_FC_STS BIT(1) ++ ++#define DFC_AP (0x180) ++#define DFC_FREQ_LV 0x1 ++#define DFC_REQ BIT(0) ++ ++#define DFC_STATUS (0x188) ++#define DFC_CAUSE_SHIFT 0x7 ++#define DFC_STS BIT(0) ++ ++/* enable/disable ddr frequency change done interrupt */ ++static void ccu_ddr_enable_dfc_int(struct ccu_common *common, bool enable) ++{ ++ u32 val; ++ unsigned long flags; ++ ++ spin_lock_irqsave(common->lock, flags); ++ val = readl(common->base + PMU_AP_IMR); ++ if (enable) ++ val |= AP_DCLK_FC_DONE_INT_MSK; ++ else ++ val &= ~AP_DCLK_FC_DONE_INT_MSK; ++ ++ writel(val, common->base + PMU_AP_IMR); ++ spin_unlock_irqrestore(common->lock, flags); ++} ++ ++/* clear ddr frequency change done interrupt status*/ ++static void ccu_ddr_clear_dfc_int_status(struct ccu_common *common) ++{ ++ u32 val; ++ unsigned long flags; ++ ++ spin_lock_irqsave(common->lock, flags); ++ val = readl(common->base + PMU_AP_ISR); ++ val &= ~(AP_DCLK_FC_DONE_INT_STS | AP_FC_STS); ++ writel(val, common->base + PMU_AP_ISR); ++ spin_unlock_irqrestore(common->lock, flags); ++} ++ ++static int ccu_ddr_wait_freq_change_done(struct ccu_common *common) ++{ ++ int timeout = 100; ++ u32 val; ++ ++ while (--timeout) { ++ udelay(10); ++ val = readl(common->base + PMU_AP_ISR); ++ if (val & AP_DCLK_FC_DONE_INT_STS) ++ break; ++ } ++ if (!timeout) { ++ pr_err("%s: wait dfc done timeout!\n", __func__); ++ return -EBUSY; ++ } ++ return 0; ++} ++ ++static int ccu_ddr_freq_chg(struct ccu_common *common, ++ struct ccu_mux_config *mux, u8 level) ++{ ++ u32 reg; ++ u32 timeout; ++ unsigned long flags; ++ ++ if (level > MAX_FREQ_LV) { ++ pr_err("%s: invalid %d freq level\n", __func__, level); ++ return -EINVAL; ++ } ++ ++ /* check if dfc in progress */ ++ timeout = 1000; ++ while (--timeout) { ++ if (!(readl(common->base + DFC_STATUS) & DFC_STS)) ++ break; ++ udelay(10); ++ } ++ ++ if (!timeout) { ++ pr_err("%s: another dfc is in pregress. status:0x%x\n", ++ __func__, readl(common->base + DFC_STATUS)); ++ return -EBUSY; ++ } ++ ++ spin_lock_irqsave(common->lock, flags); ++ reg = readl(common->base + common->reg_sel); ++ reg &= ~GENMASK(mux->width + mux->shift - 1, mux->shift); ++ writel(reg | (level << mux->shift) | common->fc, ++ common->base + common->reg_sel); ++ spin_unlock_irqrestore(common->lock, flags); ++ ++ timeout = 1000; ++ while (--timeout) { ++ udelay(10); ++ if (!(readl(common->base + DFC_STATUS) & DFC_STS)) ++ break; ++ } ++ ++ if (!timeout) { ++ pr_err("dfc error! status:0x%x\n", ++ readl(common->base + DFC_STATUS)); ++ return -EBUSY; ++ } ++ ++ return 0; ++} ++ ++static unsigned long ccu_ddr_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ return parent_rate; ++} ++ ++static long ccu_ddr_round_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long *prate) ++{ ++ return rate; ++} ++ ++unsigned long ccu_ddr_calc_best_rate(struct clk_hw *hw, unsigned long rate, ++ u32 *mux_val) ++{ ++ struct ccu_ddr *ddr = hw_to_ccu_ddr(hw); ++ struct ccu_common *common = &ddr->common; ++ struct clk_hw *parent; ++ unsigned long parent_rate = 0, best_rate = 0; ++ u32 i; ++ ++ for (i = 0; i < common->num_parents; i++) { ++ parent = clk_hw_get_parent_by_index(hw, i); ++ if (!parent) ++ continue; ++ parent_rate = clk_get_rate(clk_hw_get_clk(parent, ++ common->name)); ++ if (abs(parent_rate - rate) < abs(best_rate - rate)) { ++ best_rate = parent_rate; ++ *mux_val = i; ++ } ++ } ++ return best_rate; ++} ++ ++static int ccu_ddr_set_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long parent_rate) ++{ ++ struct ccu_ddr *ddr = hw_to_ccu_ddr(hw); ++ struct ccu_common *common = &ddr->common; ++ struct ccu_mux_config *mux = ddr->mux ? ddr->mux : NULL; ++ unsigned long best_rate = 0; ++ u32 cur_mux, mux_val = 0; ++ u32 reg = 0; ++ ++ if (!mux) ++ return 0; ++ ++ best_rate = ccu_ddr_calc_best_rate(hw, rate, &mux_val); ++ ++ reg = readl(common->base + common->reg_sel); ++ if (mux) { ++ cur_mux = reg >> mux->shift; ++ cur_mux &= (1 << mux->width) - 1; ++ if (cur_mux != mux_val) ++ clk_hw_set_parent(hw, clk_hw_get_parent_by_index(hw, mux_val)); ++ } ++ return 0; ++} ++ ++static u8 ccu_ddr_get_parent(struct clk_hw *hw) ++{ ++ struct ccu_ddr *ddr = hw_to_ccu_ddr(hw); ++ struct ccu_common *common = &ddr->common; ++ struct ccu_mux_config *mux = ddr->mux; ++ u32 reg; ++ u8 parent; ++ ++ if (!mux) ++ return 0; ++ ++ reg = readl(common->base + common->reg_sel); ++ ++ parent = reg >> mux->shift; ++ parent &= (1 << mux->width) - 1; ++ ++ if (mux->table) { ++ int num_parents = clk_hw_get_num_parents(&common->hw); ++ int i; ++ ++ for (i = 0; i < num_parents; i++) ++ if (mux->table[i] == parent) ++ return i; ++ } ++ return parent; ++} ++ ++static int ccu_ddr_set_parent(struct clk_hw *hw, u8 index) ++{ ++ struct ccu_ddr *ddr = hw_to_ccu_ddr(hw); ++ struct ccu_common *common = &ddr->common; ++ struct ccu_mux_config *mux = ddr->mux; ++ int ret = 0; ++ ++ if (!mux) ++ return 0; ++ ++ if (mux->table) ++ index = mux->table[index]; ++ ++ /* request change begin */ ++ ccu_ddr_enable_dfc_int(common, true); ++ ++ /* change parent*/ ++ ret = ccu_ddr_freq_chg(common, mux, index); ++ if (ret < 0) { ++ pr_err("%s: ddr_freq_chg fail. ret = %d\n", __func__, ret); ++ return ret; ++ } ++ ++ /* wait for frequency change done */ ++ ret = ccu_ddr_wait_freq_change_done(common); ++ if (ret < 0) { ++ pr_err("%s: wait_freq_change_done timeout. ret = %d\n", ++ __func__, ret); ++ return ret; ++ } ++ ccu_ddr_clear_dfc_int_status(common); ++ ccu_ddr_enable_dfc_int(common, false); ++ ++ return 0; ++} ++ ++static int ccu_ddr_determine_rate(struct clk_hw *hw, ++ struct clk_rate_request *req) ++{ ++ unsigned long best_rate = req->rate; ++ u32 mux_val = 0; ++ ++ best_rate = ccu_ddr_calc_best_rate(hw, req->rate, &mux_val); ++ req->rate = best_rate; ++ return 0; ++} ++ ++const struct clk_ops ccu_ddr_ops = { ++ .get_parent = ccu_ddr_get_parent, ++ .set_parent = ccu_ddr_set_parent, ++ .determine_rate = ccu_ddr_determine_rate, ++ .round_rate = ccu_ddr_round_rate, ++ .recalc_rate = ccu_ddr_recalc_rate, ++ .set_rate = ccu_ddr_set_rate, ++}; ++ +diff --git a/drivers/clk/spacemit/ccu_ddr.h b/drivers/clk/spacemit/ccu_ddr.h +new file mode 100644 +index 000000000000..960ca3456796 +--- /dev/null ++++ b/drivers/clk/spacemit/ccu_ddr.h +@@ -0,0 +1,44 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (c) 2023, spacemit Corporation. */ ++ ++#ifndef _CCU_DDR_H_ ++#define _CCU_DDR_H_ ++ ++#include ++#include "ccu-spacemit-k1x.h" ++#include "ccu_mix.h" ++ ++struct ccu_ddr { ++ struct ccu_mux_config *mux; ++ struct ccu_common common; ++}; ++ ++#define MAX_FREQ_LV 7 ++ ++#define SPACEMIT_CCU_DDR_FC(_struct, _name, _parents, _base_type, \ ++ _reg, _fc, _shift, _width, _flags) \ ++ struct ccu_ddr _struct = { \ ++ .mux = CCU_MUX_INIT(_shift, _width, NULL, 0), \ ++ .common = { \ ++ .reg_sel = _reg, \ ++ .fc = _fc, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .parent_names = _parents, \ ++ .num_parents = ARRAY_SIZE(_parents), \ ++ .hw.init = CLK_HW_INIT_PARENTS(_name, \ ++ _parents, &ccu_ddr_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ } \ ++ } ++ ++static inline struct ccu_ddr *hw_to_ccu_ddr(struct clk_hw *hw) ++{ ++ struct ccu_common *common = hw_to_ccu_common(hw); ++ ++ return container_of(common, struct ccu_ddr, common); ++} ++ ++extern const struct clk_ops ccu_ddr_ops; ++ ++#endif /* _CCU_DDR_H_ */ +diff --git a/drivers/clk/spacemit/ccu_dpll.c b/drivers/clk/spacemit/ccu_dpll.c +new file mode 100644 +index 000000000000..ff8b699e1ba2 +--- /dev/null ++++ b/drivers/clk/spacemit/ccu_dpll.c +@@ -0,0 +1,124 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Spacemit clock type pll ++ * ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "ccu_dpll.h" ++ ++#define DPLL_MIN_FREQ 1700000000 ++#define DPLL_MAX_FREQ 3400000000 ++ ++#define pll_readl(reg) readl(reg) ++#define pll_readl_pll_swcr1(p) pll_readl(p.base + p.reg_ctrl) ++#define pll_readl_pll_swcr2(p) pll_readl(p.base + p.reg_sel) ++ ++#define pll_writel(val, reg) writel(val, reg) ++#define pll_writel_pll_swcr1(val, p) pll_writel(val, p.base + p.reg_ctrl) ++#define pll_writel_pll_swcr2(val, p) pll_writel(val, p.base + p.reg_sel) ++ ++/* unified dpllx_swcr1 for dpll1~2 */ ++union dpllx_swcr1 { ++ struct { ++ unsigned int reg0:8; ++ unsigned int reg1:8; ++ unsigned int reg2:8; ++ unsigned int reg3:8; ++ } b; ++ unsigned int v; ++}; ++ ++/* unified dpllx_swcr2 for dpll1~2 */ ++union dpllx_swcr2 { ++ struct { ++ unsigned int reg4:8; ++ unsigned int reg5:8; ++ unsigned int reg6:8; ++ unsigned int reg7:8; ++ } b; ++ unsigned int v; ++}; ++ ++/* frequency unit Mhz, return pll vco freq */ ++static unsigned long __get_vco_freq(struct clk_hw *hw) ++{ ++ unsigned int reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, size, i; ++ struct ccu_dpll_rate_tbl *freq_pll_regs_table, *pll_reg; ++ struct ccu_dpll *p = hw_to_ccu_dpll(hw); ++ union dpllx_swcr1 swcr1; ++ union dpllx_swcr2 swcr2; ++ ++ swcr1.v = pll_readl_pll_swcr1(p->common); ++ swcr2.v = pll_readl_pll_swcr2(p->common); ++ ++ reg0 = swcr1.b.reg0; ++ reg1 = swcr1.b.reg1; ++ reg2 = swcr1.b.reg2; ++ reg3 = swcr1.b.reg3; ++ reg4 = swcr2.b.reg4; ++ reg5 = swcr2.b.reg5; ++ reg6 = swcr2.b.reg6; ++ reg7 = swcr2.b.reg7; ++ ++ freq_pll_regs_table = p->dpll.rate_tbl; ++ size = p->dpll.tbl_size; ++ ++ for (i = 0; i < size; i++) { ++ pll_reg = &freq_pll_regs_table[i]; ++ if (pll_reg->reg0 == reg0 && pll_reg->reg1 == reg1 && ++ pll_reg->reg2 == reg2 && pll_reg->reg3 == reg3 && ++ pll_reg->reg4 == reg4 && pll_reg->reg5 == reg5 && ++ pll_reg->reg6 == reg6 && pll_reg->reg7 == reg7) ++ return pll_reg->rate; ++ } ++ ++ pr_err("Unknown rate for clock %s\n", __clk_get_name(hw->clk)); ++ return 0; ++} ++ ++static unsigned long ccu_dpll_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ return __get_vco_freq(hw); ++} ++ ++static long ccu_dpll_round_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long *prate) ++{ ++ struct ccu_dpll *p = hw_to_ccu_dpll(hw); ++ unsigned long max_rate = 0; ++ unsigned int i; ++ struct ccu_dpll_config *params = &p->dpll; ++ ++ if (rate > DPLL_MAX_FREQ || rate < DPLL_MIN_FREQ) { ++ pr_err("%lu rate out of range!\n", rate); ++ return -EINVAL; ++ } ++ ++ if (params->rate_tbl) { ++ for (i = 0; i < params->tbl_size; i++) { ++ if (params->rate_tbl[i].rate <= rate) { ++ if (max_rate < params->rate_tbl[i].rate) ++ max_rate = params->rate_tbl[i].rate; ++ } ++ } ++ } else { ++ pr_err("don't find freq table for pll\n"); ++ } ++ ++ return max_rate; ++} ++ ++const struct clk_ops ccu_dpll_ops = { ++ .recalc_rate = ccu_dpll_recalc_rate, ++ .round_rate = ccu_dpll_round_rate, ++}; ++ +diff --git a/drivers/clk/spacemit/ccu_dpll.h b/drivers/clk/spacemit/ccu_dpll.h +new file mode 100644 +index 000000000000..d5632528dc1f +--- /dev/null ++++ b/drivers/clk/spacemit/ccu_dpll.h +@@ -0,0 +1,76 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (c) 2023, spacemit Corporation. */ ++ ++#ifndef _CCU_DPLL_H_ ++#define _CCU_DPLL_H_ ++ ++#include ++#include ++#include "ccu-spacemit-k1x.h" ++ ++struct ccu_dpll_rate_tbl { ++ unsigned long long rate; ++ u32 reg0; ++ u32 reg1; ++ u32 reg2; ++ u32 reg3; ++ u32 reg4; ++ u32 reg5; ++ u32 reg6; ++ u32 reg7; ++}; ++ ++struct ccu_dpll_config { ++ struct ccu_dpll_rate_tbl *rate_tbl; ++ u32 tbl_size; ++}; ++ ++#define DPLL_RATE(_rate, _reg0, _reg1, _reg2, _reg3, _reg4, \ ++ _reg5, _reg6, _reg7) \ ++ { \ ++ .rate = (_rate), \ ++ .reg0 = (_reg0), \ ++ .reg1 = (_reg1), \ ++ .reg2 = (_reg2), \ ++ .reg3 = (_reg3), \ ++ .reg4 = (_reg4), \ ++ .reg5 = (_reg5), \ ++ .reg6 = (_reg6), \ ++ .reg7 = (_reg7), \ ++ } ++ ++struct ccu_dpll { ++ struct ccu_dpll_config dpll; ++ struct ccu_common common; ++}; ++ ++#define _SPACEMIT_CCU_DPLL_CONFIG(_table, _size) \ ++ { \ ++ .rate_tbl = (struct ccu_dpll_rate_tbl *)_table, \ ++ .tbl_size = _size, \ ++ } ++ ++#define SPACEMIT_CCU_DPLL(_struct, _name, _table, _size, _base_type, \ ++ _reg_ctrl, _reg_sel, _is_pll, _flags) \ ++ struct ccu_dpll _struct = { \ ++ .dpll = _SPACEMIT_CCU_DPLL_CONFIG(_table, _size), \ ++ .common = { \ ++ .reg_ctrl = _reg_ctrl, \ ++ .reg_sel = _reg_sel, \ ++ .base_type = _base_type, \ ++ .is_pll = 0, \ ++ .hw.init = CLK_HW_INIT_NO_PARENT(_name, \ ++ &ccu_dpll_ops, _flags), \ ++ } \ ++ } ++ ++static inline struct ccu_dpll *hw_to_ccu_dpll(struct clk_hw *hw) ++{ ++ struct ccu_common *common = hw_to_ccu_common(hw); ++ ++ return container_of(common, struct ccu_dpll, common); ++} ++ ++extern const struct clk_ops ccu_dpll_ops; ++ ++#endif +diff --git a/drivers/clk/spacemit/ccu_mix.c b/drivers/clk/spacemit/ccu_mix.c +new file mode 100644 +index 000000000000..baa341090f53 +--- /dev/null ++++ b/drivers/clk/spacemit/ccu_mix.c +@@ -0,0 +1,502 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Spacemit clock type mix(div/mux/gate/factor) ++ * ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include "ccu_mix.h" ++ ++#define TIMEOUT_LIMIT (20000) ++static int twsi8_reg_val = 0x04; ++const char *tswi8_clk_name = "twsi8_clk"; ++ ++static void ccu_mix_disable(struct clk_hw *hw) ++{ ++ struct ccu_mix *mix = hw_to_ccu_mix(hw); ++ struct ccu_common *common = &mix->common; ++ struct ccu_gate_config *gate = mix->gate; ++ unsigned long flags = 0; ++ unsigned long rate; ++ u32 tmp; ++ ++ if (!gate) ++ return; ++ ++ if (!strcmp(common->name, tswi8_clk_name)) { ++ twsi8_reg_val &= ~gate->gate_mask; ++ twsi8_reg_val |= gate->val_disable; ++ tmp = twsi8_reg_val; ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ writel(tmp, common->base + common->reg_sel); ++ else ++ writel(tmp, common->base + common->reg_ctrl); ++ return; ++ } ++ ++ if (common->lock) ++ spin_lock_irqsave(common->lock, flags); ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ tmp = readl(common->base + common->reg_sel); ++ else ++ tmp = readl(common->base + common->reg_ctrl); ++ ++ tmp &= ~gate->gate_mask; ++ tmp |= gate->val_disable; ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ writel(tmp, common->base + common->reg_sel); ++ else ++ writel(tmp, common->base + common->reg_ctrl); ++ ++ if (common->lock) ++ spin_unlock_irqrestore(common->lock, flags); ++ ++ if (gate->flags & SPACEMIT_CLK_GATE_NEED_DELAY) { ++ rate = clk_hw_get_rate(&common->hw); ++ ++ if (rate == 0) ++ pr_err("clock rate of %s is 0.\n", ++ clk_hw_get_name(&common->hw)); ++ else ++ udelay(DIV_ROUND_UP(2000000, rate)); ++ } ++} ++ ++static int ccu_mix_enable(struct clk_hw *hw) ++{ ++ struct ccu_mix *mix = hw_to_ccu_mix(hw); ++ struct ccu_common *common = &mix->common; ++ struct ccu_gate_config *gate = mix->gate; ++ unsigned long flags = 0; ++ unsigned long rate; ++ u32 tmp; ++ u32 val = 0; ++ int timeout_power = 1; ++ ++ if (!gate) ++ return 0; ++ ++ if (!strcmp(common->name, tswi8_clk_name)) { ++ twsi8_reg_val &= ~gate->gate_mask; ++ twsi8_reg_val |= gate->val_enable; ++ tmp = twsi8_reg_val; ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ writel(tmp, common->base + common->reg_sel); ++ else ++ writel(tmp, common->base + common->reg_ctrl); ++ return 0; ++ } ++ ++ if (common->lock) ++ spin_lock_irqsave(common->lock, flags); ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ tmp = readl(common->base + common->reg_sel); ++ else ++ tmp = readl(common->base + common->reg_ctrl); ++ ++ tmp &= ~gate->gate_mask; ++ tmp |= gate->val_enable; ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ writel(tmp, common->base + common->reg_sel); ++ else ++ writel(tmp, common->base + common->reg_ctrl); ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ val = readl(common->base + common->reg_sel); ++ else ++ val = readl(common->base + common->reg_ctrl); ++ ++ if (common->lock) ++ spin_unlock_irqrestore(common->lock, flags); ++ ++ while ((val & gate->gate_mask) != gate->val_enable && ++ (timeout_power < TIMEOUT_LIMIT)) { ++ udelay(timeout_power); ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ val = readl(common->base + common->reg_sel); ++ else ++ val = readl(common->base + common->reg_ctrl); ++ timeout_power *= 10; ++ } ++ ++ if (timeout_power > 1) { ++ if (val == tmp) ++ pr_err("write clk_gate %s timeout occur, read pass after %d us delay\n", ++ clk_hw_get_name(&common->hw), timeout_power); ++ else ++ pr_err("write clk_gate %s timeout after %d us!\n", ++ clk_hw_get_name(&common->hw), timeout_power); ++ } ++ ++ if (gate->flags & SPACEMIT_CLK_GATE_NEED_DELAY) { ++ rate = clk_hw_get_rate(&common->hw); ++ ++ if (rate == 0) ++ pr_err("clock rate of %s is 0.\n", ++ clk_hw_get_name(&common->hw)); ++ else ++ udelay(DIV_ROUND_UP(2000000, rate)); ++ } ++ ++ return 0; ++} ++ ++static int ccu_mix_is_enabled(struct clk_hw *hw) ++{ ++ struct ccu_mix *mix = hw_to_ccu_mix(hw); ++ struct ccu_common *common = &mix->common; ++ struct ccu_gate_config *gate = mix->gate; ++ unsigned long flags = 0; ++ u32 tmp; ++ ++ if (!gate) ++ return 1; ++ ++ if (!strcmp(common->name, tswi8_clk_name)) ++ return (twsi8_reg_val & gate->gate_mask) == gate->val_enable; ++ ++ if (common->lock) ++ spin_lock_irqsave(common->lock, flags); ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ tmp = readl(common->base + common->reg_sel); ++ else ++ tmp = readl(common->base + common->reg_ctrl); ++ ++ if (common->lock) ++ spin_unlock_irqrestore(common->lock, flags); ++ ++ return (tmp & gate->gate_mask) == gate->val_enable; ++} ++ ++static unsigned long ccu_mix_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ struct ccu_mix *mix = hw_to_ccu_mix(hw); ++ struct ccu_common *common = &mix->common; ++ struct ccu_div_config *div = mix->div; ++ unsigned long val; ++ u32 reg; ++ ++ if (!div) { ++ if (mix->factor) ++ parent_rate = parent_rate * mix->factor->mul / mix->factor->div; ++ return parent_rate; ++ } ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ reg = readl(common->base + common->reg_sel); ++ else ++ reg = readl(common->base + common->reg_ctrl); ++ ++ val = reg >> div->shift; ++ val &= (1 << div->width) - 1; ++ ++ val = divider_recalc_rate(hw, parent_rate, val, div->table, ++ div->flags, div->width); ++ ++ return val; ++} ++ ++static int ccu_mix_trigger_fc(struct clk_hw *hw) ++{ ++ struct ccu_mix *mix = hw_to_ccu_mix(hw); ++ struct ccu_common *common = &mix->common; ++ unsigned long val = 0; ++ ++ int ret = 0, timeout = 50; ++ ++ if (common->reg_type == CLK_DIV_TYPE_1REG_FC_V2 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4 || ++ common->reg_type == CLK_DIV_TYPE_1REG_FC_DIV_V5 || ++ common->reg_type == CLK_DIV_TYPE_1REG_FC_MUX_V6) { ++ timeout = 50; ++ val = readl(common->base + common->reg_ctrl); ++ val |= common->fc; ++ writel(val, common->base + common->reg_ctrl); ++ ++ do { ++ val = readl(common->base + common->reg_ctrl); ++ timeout--; ++ if (!(val & common->fc)) ++ break; ++ } while (timeout); ++ ++ if (timeout == 0) { ++ timeout = 5000; ++ do { ++ val = readl(common->base + common->reg_ctrl); ++ timeout--; ++ if (!(val & common->fc)) ++ break; ++ } while (timeout); ++ if (timeout != 0) ++ ret = 0; ++ else ++ ret = -1; ++ } ++ } ++ ++ return ret; ++} ++ ++static long ccu_mix_round_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long *prate) ++{ ++ return rate; ++} ++ ++unsigned long ccu_mix_calc_best_rate(struct clk_hw *hw, ++ unsigned long rate, u32 *mux_val, ++ u32 *div_val) ++{ ++ struct ccu_mix *mix = hw_to_ccu_mix(hw); ++ struct ccu_common *common = &mix->common; ++ struct ccu_div_config *div = mix->div ? mix->div : NULL; ++ struct clk *clk; ++ struct clk_hw *parent; ++ unsigned long parent_rate = 0, best_rate = 0; ++ u32 i, j, div_max; ++ ++ for (i = 0; i < common->num_parents; i++) { ++ parent = clk_hw_get_parent_by_index(hw, i); ++ if (!parent) ++ continue; ++ clk = clk_hw_get_clk(parent, common->name); ++ parent_rate = clk_get_rate(clk); ++ ++ if (div) ++ div_max = 1 << div->width; ++ else ++ div_max = 1; ++ ++ for (j = 1; j <= div_max; j++) { ++ if (abs(parent_rate / j - rate) ++ < abs(best_rate - rate)) { ++ best_rate = DIV_ROUND_UP_ULL(parent_rate, j); ++ *mux_val = i; ++ *div_val = j - 1; ++ } ++ } ++ } ++ ++ return best_rate; ++} ++ ++static int ccu_mix_determine_rate(struct clk_hw *hw, ++ struct clk_rate_request *req) ++{ ++ return 0; ++} ++ ++static int ccu_mix_set_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long parent_rate) ++{ ++ struct ccu_mix *mix = hw_to_ccu_mix(hw); ++ struct ccu_common *common = &mix->common; ++ struct ccu_div_config *div = mix->div ? mix->div : NULL; ++ struct ccu_mux_config *mux = mix->mux ? mix->mux : NULL; ++ struct clk_hw *parent; ++ unsigned long best_rate = 0; ++ unsigned long flags; ++ u32 cur_mux, cur_div, mux_val = 0, div_val = 0; ++ u32 reg = 0; ++ int ret = 0; ++ ++ if (!div && !mux) ++ return 0; ++ ++ best_rate = ccu_mix_calc_best_rate(hw, rate, &mux_val, &div_val); ++ if (!strcmp(common->name, tswi8_clk_name)) { ++ if (mux) { ++ cur_mux = twsi8_reg_val >> mux->shift; ++ cur_mux &= (1 << mux->width) - 1; ++ parent = clk_hw_get_parent_by_index(hw, mux_val); ++ if (cur_mux != mux_val) ++ clk_hw_set_parent(hw, parent); ++ } ++ return 0; ++ } ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ reg = readl(common->base + common->reg_sel); ++ else ++ reg = readl(common->base + common->reg_ctrl); ++ ++ if (mux) { ++ cur_mux = reg >> mux->shift; ++ cur_mux &= (1 << mux->width) - 1; ++ parent = clk_hw_get_parent_by_index(hw, mux_val); ++ if (cur_mux != mux_val) ++ clk_hw_set_parent(hw, parent); ++ } ++ ++ if (div) { ++ cur_div = reg >> div->shift; ++ cur_div &= (1 << div->width) - 1; ++ if (cur_div == div_val) ++ return 0; ++ } else { ++ return 0; ++ } ++ ++ spin_lock_irqsave(common->lock, flags); ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ reg = readl(common->base + common->reg_sel); ++ else ++ reg = readl(common->base + common->reg_ctrl); ++ ++ reg &= ~GENMASK(div->width + div->shift - 1, div->shift); ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ writel(reg | (div_val << div->shift), ++ common->base + common->reg_sel); ++ else ++ writel(reg | (div_val << div->shift), ++ common->base + common->reg_ctrl); ++ ++ if (common->reg_type == CLK_DIV_TYPE_1REG_FC_V2 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4 || ++ common->reg_type == CLK_DIV_TYPE_1REG_FC_DIV_V5) { ++ ret = ccu_mix_trigger_fc(hw); ++ } ++ spin_unlock_irqrestore(common->lock, flags); ++ ++ if (ret) ++ pr_err("%s of %s timeout\n", __func__, ++ clk_hw_get_name(&common->hw)); ++ ++ return ret; ++} ++ ++static u8 ccu_mix_get_parent(struct clk_hw *hw) ++{ ++ struct ccu_mix *mix = hw_to_ccu_mix(hw); ++ struct ccu_common *common = &mix->common; ++ struct ccu_mux_config *mux = mix->mux; ++ u32 reg; ++ u8 parent; ++ ++ if (!mux) ++ return 0; ++ ++ if (!strcmp(common->name, tswi8_clk_name)) { ++ parent = twsi8_reg_val >> mux->shift; ++ parent &= (1 << mux->width) - 1; ++ return parent; ++ } ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ reg = readl(common->base + common->reg_sel); ++ else ++ reg = readl(common->base + common->reg_ctrl); ++ ++ parent = reg >> mux->shift; ++ parent &= (1 << mux->width) - 1; ++ ++ if (mux->table) { ++ int num_parents = clk_hw_get_num_parents(&common->hw); ++ int i; ++ ++ for (i = 0; i < num_parents; i++) ++ if (mux->table[i] == parent) ++ return i; ++ } ++ return parent; ++} ++ ++static int ccu_mix_set_parent(struct clk_hw *hw, u8 index) ++{ ++ struct ccu_mix *mix = hw_to_ccu_mix(hw); ++ struct ccu_common *common = &mix->common; ++ struct ccu_mux_config *mux = mix->mux; ++ unsigned long flags; ++ u32 reg = 0; ++ int ret = 0; ++ ++ if (!mux) ++ return 0; ++ ++ if (mux->table) ++ index = mux->table[index]; ++ ++ if (!strcmp(common->name, tswi8_clk_name)) { ++ twsi8_reg_val &= ~GENMASK(mux->width ++ + mux->shift - 1, mux->shift); ++ twsi8_reg_val |= (index << mux->shift); ++ reg = twsi8_reg_val; ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ writel(reg, common->base + common->reg_sel); ++ else ++ writel(reg, common->base + common->reg_ctrl); ++ return 0; ++ } ++ ++ spin_lock_irqsave(common->lock, flags); ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ reg = readl(common->base + common->reg_sel); ++ else ++ reg = readl(common->base + common->reg_ctrl); ++ ++ reg &= ~GENMASK(mux->width + mux->shift - 1, mux->shift); ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ writel(reg | (index << mux->shift), ++ common->base + common->reg_sel); ++ else ++ writel(reg | (index << mux->shift), ++ common->base + common->reg_ctrl); ++ ++ if (common->reg_type == CLK_DIV_TYPE_1REG_FC_V2 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4 || ++ common->reg_type == CLK_DIV_TYPE_1REG_FC_MUX_V6) { ++ ret = ccu_mix_trigger_fc(hw); ++ } ++ spin_unlock_irqrestore(common->lock, flags); ++ ++ if (ret) ++ pr_err("%s of %s timeout\n", __func__, ++ clk_hw_get_name(&common->hw)); ++ ++ return 0; ++} ++ ++const struct clk_ops ccu_mix_ops = { ++ .disable = ccu_mix_disable, ++ .enable = ccu_mix_enable, ++ .is_enabled = ccu_mix_is_enabled, ++ .get_parent = ccu_mix_get_parent, ++ .set_parent = ccu_mix_set_parent, ++ .determine_rate = ccu_mix_determine_rate, ++ .round_rate = ccu_mix_round_rate, ++ .recalc_rate = ccu_mix_recalc_rate, ++ .set_rate = ccu_mix_set_rate, ++}; ++ +diff --git a/drivers/clk/spacemit/ccu_mix.h b/drivers/clk/spacemit/ccu_mix.h +new file mode 100644 +index 000000000000..4b7d67cb0225 +--- /dev/null ++++ b/drivers/clk/spacemit/ccu_mix.h +@@ -0,0 +1,380 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (c) 2023, spacemit Corporation. */ ++ ++#ifndef _CCU_MIX_H_ ++#define _CCU_MIX_H_ ++ ++#include ++#include "ccu-spacemit-k1x.h" ++ ++#define SPACEMIT_CLK_GATE_NEED_DELAY BIT(0) ++ ++struct ccu_gate_config { ++ u32 gate_mask; ++ u32 val_enable; ++ u32 val_disable; ++ u32 flags; ++}; ++ ++struct ccu_factor_config { ++ u32 div; ++ u32 mul; ++}; ++ ++struct ccu_mux_config { ++ u8 shift; ++ u8 width; ++ const u8 *table; ++ u32 flags; ++}; ++ ++struct ccu_div_config { ++ u8 shift; ++ u8 width; ++ u32 max; ++ u32 offset; ++ u32 flags; ++ struct clk_div_table *table; ++}; ++ ++struct ccu_mix { ++ struct ccu_gate_config *gate; ++ struct ccu_factor_config *factor; ++ struct ccu_div_config *div; ++ struct ccu_mux_config *mux; ++ struct ccu_common common; ++}; ++ ++#define CCU_GATE_INIT(_gate_mask, _val_enable, _val_disable, _flags) \ ++ (&(struct ccu_gate_config) { \ ++ .gate_mask = _gate_mask, \ ++ .val_enable = _val_enable, \ ++ .val_disable = _val_disable, \ ++ .flags = _flags, \ ++ }) ++ ++#define CCU_FACTOR_INIT(_div, _mul) \ ++ (&(struct ccu_factor_config) { \ ++ .div = _div, \ ++ .mul = _mul, \ ++ }) ++ ++#define CCU_MUX_INIT(_shift, _width, _table, _flags) \ ++ (&(struct ccu_mux_config) { \ ++ .shift = _shift, \ ++ .width = _width, \ ++ .table = _table, \ ++ .flags = _flags, \ ++ }) ++ ++#define CCU_DIV_INIT(_shift, _width, _table, _flags) \ ++ (&(struct ccu_div_config) { \ ++ .shift = _shift, \ ++ .width = _width, \ ++ .flags = _flags, \ ++ .table = _table, \ ++ }) ++ ++#define SPACEMIT_CCU_GATE(_struct, _name, _parent, _base_type, _reg, \ ++ _gate_mask, _val_enable, _val_disable, _flags) \ ++ struct ccu_mix _struct = { \ ++ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, \ ++ _val_disable, 0), \ ++ .common = { \ ++ .reg_ctrl = _reg, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .num_parents = 1, \ ++ .hw.init = CLK_HW_INIT(_name, _parent, \ ++ &ccu_mix_ops, _flags), \ ++ } \ ++ } ++ ++#define SPACEMIT_CCU_GATE_NO_PARENT(_struct, _name, _parent, \ ++ _base_type, _reg, _gate_mask, _val_enable, \ ++ _val_disable, _flags) \ ++ struct ccu_mix _struct = { \ ++ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, \ ++ _val_disable, 0), \ ++ .common = { \ ++ .reg_ctrl = _reg, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .num_parents = 0, \ ++ .hw.init = CLK_HW_INIT_NO_PARENT(_name, \ ++ &ccu_mix_ops, _flags), \ ++ } \ ++ } ++ ++#define SPACEMIT_CCU_FACTOR(_struct, _name, _parent, _div, _mul) \ ++ struct ccu_mix _struct = { \ ++ .factor = CCU_FACTOR_INIT(_div, _mul), \ ++ .common = { \ ++ .name = _name, \ ++ .num_parents = 1, \ ++ .hw.init = CLK_HW_INIT(_name, \ ++ _parent, &ccu_mix_ops, 0), \ ++ } \ ++ } ++ ++#define SPACEMIT_CCU_MUX(_struct, _name, _parents, _base_type, \ ++ _reg, _shift, _width, _flags) \ ++ struct ccu_mix _struct = { \ ++ .mux = CCU_MUX_INIT(_shift, _width, NULL, 0), \ ++ .common = { \ ++ .reg_ctrl = _reg, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .parent_names = _parents, \ ++ .num_parents = ARRAY_SIZE(_parents), \ ++ .hw.init = CLK_HW_INIT_PARENTS(_name, \ ++ _parents, &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ } \ ++ } ++ ++#define SPACEMIT_CCU_DIV(_struct, _name, _parent, _base_type, \ ++ _reg, _shift, _width, _flags) \ ++ struct ccu_mix _struct = { \ ++ .div = CCU_DIV_INIT(_shift, _width, NULL, 0), \ ++ .common = { \ ++ .reg_ctrl = _reg, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .num_parents = 1, \ ++ .hw.init = CLK_HW_INIT(_name, _parent, \ ++ &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ } \ ++ } ++ ++#define SPACEMIT_CCU_GATE_FACTOR(_struct, _name, _parent, _base_type, \ ++ _reg, _gate_mask, _val_enable, _val_disable, \ ++ _div, _mul, _flags) \ ++ struct ccu_mix _struct = { \ ++ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, \ ++ _val_disable, 0), \ ++ .factor = CCU_FACTOR_INIT(_div, _mul), \ ++ .common = { \ ++ .reg_ctrl = _reg, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .num_parents = 1, \ ++ .hw.init = CLK_HW_INIT(_name, _parent, \ ++ &ccu_mix_ops, _flags), \ ++ } \ ++ } ++ ++#define SPACEMIT_CCU_MUX_GATE(_struct, _name, _parents, _base_type, \ ++ _reg, _shift, _width, _gate_mask, _val_enable, \ ++ _val_disable, _flags) \ ++ struct ccu_mix _struct = { \ ++ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, \ ++ _val_disable, 0), \ ++ .mux = CCU_MUX_INIT(_shift, _width, NULL, 0), \ ++ .common = { \ ++ .reg_ctrl = _reg, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .parent_names = _parents, \ ++ .num_parents = ARRAY_SIZE(_parents), \ ++ .hw.init = CLK_HW_INIT_PARENTS(_name, \ ++ _parents, &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ } \ ++ } ++ ++#define SPACEMIT_CCU_DIV_GATE(_struct, _name, _parent, _base_type, \ ++ _reg, _shift, _width, _gate_mask, _val_enable, \ ++ _val_disable, _flags) \ ++ struct ccu_mix _struct = { \ ++ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, \ ++ _val_disable, 0), \ ++ .div = CCU_DIV_INIT(_shift, _width, NULL, 0), \ ++ .common = { \ ++ .reg_ctrl = _reg, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .num_parents = 1, \ ++ .hw.init = CLK_HW_INIT(_name, _parent, \ ++ &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ } \ ++ } ++ ++#define SPACEMIT_CCU_DIV_MUX_GATE(_struct, _name, _parents, _base_type, \ ++ _reg_ctrl, _mshift, _mwidth, _muxshift, \ ++ _muxwidth, _gate_mask, _val_enable, \ ++ _val_disable, _flags) \ ++ struct ccu_mix _struct = { \ ++ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, \ ++ _val_disable, 0), \ ++ .div = CCU_DIV_INIT(_mshift, _mwidth, NULL, 0), \ ++ .mux = CCU_MUX_INIT(_muxshift, _muxwidth, NULL, 0), \ ++ .common = { \ ++ .reg_ctrl = _reg_ctrl, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .parent_names = _parents, \ ++ .num_parents = ARRAY_SIZE(_parents), \ ++ .hw.init = CLK_HW_INIT_PARENTS(_name, \ ++ _parents, &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ }, \ ++ } ++ ++#define SPACEMIT_CCU_DIV2_FC_MUX_GATE(_struct, _name, _parents, \ ++ _base_type, _reg_ctrl, _reg_sel, _mshift, \ ++ _mwidth, _fc, _muxshift, _muxwidth, _gate_mask, \ ++ _val_enable, _val_disable, _flags) \ ++ struct ccu_mix _struct = { \ ++ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, \ ++ _val_disable, 0), \ ++ .div = CCU_DIV_INIT(_mshift, _mwidth, NULL, 0), \ ++ .mux = CCU_MUX_INIT(_muxshift, _muxwidth, NULL, 0), \ ++ .common = { \ ++ .reg_type = CLK_DIV_TYPE_2REG_FC_V4, \ ++ .reg_ctrl = _reg_ctrl, \ ++ .reg_sel = _reg_sel, \ ++ .fc = _fc, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .parent_names = _parents, \ ++ .num_parents = ARRAY_SIZE(_parents), \ ++ .hw.init = CLK_HW_INIT_PARENTS(_name, \ ++ _parents, &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ }, \ ++ } ++ ++#define SPACEMIT_CCU_DIV_FC_MUX_GATE(_struct, _name, _parents, \ ++ _base_type, _reg_ctrl, _mshift, _mwidth, _fc, \ ++ _muxshift, _muxwidth, _gate_mask, _val_enable, \ ++ _val_disable, _flags) \ ++ struct ccu_mix _struct = { \ ++ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, \ ++ _val_disable, 0), \ ++ .div = CCU_DIV_INIT(_mshift, _mwidth, NULL, 0), \ ++ .mux = CCU_MUX_INIT(_muxshift, _muxwidth, NULL, 0), \ ++ .common = { \ ++ .reg_type = CLK_DIV_TYPE_1REG_FC_V2, \ ++ .reg_ctrl = _reg_ctrl, \ ++ .fc = _fc, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .parent_names = _parents, \ ++ .num_parents = ARRAY_SIZE(_parents), \ ++ .hw.init = CLK_HW_INIT_PARENTS(_name, \ ++ _parents, &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ }, \ ++ } ++ ++#define SPACEMIT_CCU_DIV_MFC_MUX_GATE(_struct, _name, _parents, _base_type, \ ++ _reg_ctrl, _mshift, _mwidth, _fc, _muxshift, \ ++ _muxwidth, _gate_mask, _val_enable, \ ++ _val_disable, _flags) \ ++ struct ccu_mix _struct = { \ ++ .gate = CCU_GATE_INIT(_gate_mask, \ ++ _val_enable, _val_disable, 0), \ ++ .div = CCU_DIV_INIT(_mshift, _mwidth, NULL, 0), \ ++ .mux = CCU_MUX_INIT(_muxshift, _muxwidth, NULL, 0), \ ++ .common = { \ ++ .reg_type = CLK_DIV_TYPE_1REG_FC_MUX_V6, \ ++ .reg_ctrl = _reg_ctrl, \ ++ .fc = _fc, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .parent_names = _parents, \ ++ .num_parents = ARRAY_SIZE(_parents), \ ++ .hw.init = CLK_HW_INIT_PARENTS(_name, \ ++ _parents, &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ }, \ ++ } ++ ++#define SPACEMIT_CCU_DIV_FC_WITH_GATE(_struct, _name, _parent, _base_type, \ ++ _reg_ctrl, _mshift, _mwidth, _fc, _gate_mask, \ ++ _val_enable, _val_disable, _flags) \ ++ struct ccu_mix _struct = { \ ++ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, \ ++ _val_disable, 0), \ ++ .div = CCU_DIV_INIT(_mshift, _mwidth, NULL, 0), \ ++ .common = { \ ++ .reg_type = CLK_DIV_TYPE_1REG_FC_V2, \ ++ .reg_ctrl = _reg_ctrl, \ ++ .fc = _fc, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .num_parents = 1, \ ++ .hw.init = CLK_HW_INIT(_name, \ ++ _parent, &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ }, \ ++ } ++ ++#define SPACEMIT_CCU_DIV_MUX(_struct, _name, _parents, _base_type, \ ++ _reg_ctrl, _mshift, _mwidth, _muxshift, _muxwidth, _flags) \ ++ struct ccu_mix _struct = { \ ++ .div = CCU_DIV_INIT(_mshift, _mwidth, NULL, 0), \ ++ .mux = CCU_MUX_INIT(_muxshift, _muxwidth, NULL, 0), \ ++ .common = { \ ++ .reg_ctrl = _reg_ctrl, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .parent_names = _parents, \ ++ .num_parents = ARRAY_SIZE(_parents), \ ++ .hw.init = CLK_HW_INIT_PARENTS(_name, \ ++ _parents, &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ }, \ ++ } ++ ++#define SPACEMIT_CCU_DIV_FC_MUX(_struct, _name, _parents, _base_type, \ ++ _reg_ctrl, _mshift, _mwidth, _fc, _muxshift, \ ++ _muxwidth, _flags) \ ++ struct ccu_mix _struct = { \ ++ .div = CCU_DIV_INIT(_mshift, _mwidth, NULL, 0), \ ++ .mux = CCU_MUX_INIT(_muxshift, _muxwidth, NULL, 0), \ ++ .common = { \ ++ .reg_type = CLK_DIV_TYPE_1REG_FC_V2, \ ++ .reg_ctrl = _reg_ctrl, \ ++ .fc = _fc, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .parent_names = _parents, \ ++ .num_parents = ARRAY_SIZE(_parents), \ ++ .hw.init = CLK_HW_INIT_PARENTS(_name, \ ++ _parents, &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ }, \ ++ } ++ ++#define SPACEMIT_CCU_MUX_FC(_struct, _name, _parents, _base_type, \ ++ _reg_ctrl, _fc, _muxshift, _muxwidth, _flags) \ ++ struct ccu_mix _struct = { \ ++ .mux = CCU_MUX_INIT(_muxshift, _muxwidth, NULL, 0), \ ++ .common = { \ ++ .reg_type = CLK_DIV_TYPE_1REG_FC_V2, \ ++ .reg_ctrl = _reg_ctrl, \ ++ .fc = _fc, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .parent_names = _parents, \ ++ .num_parents = ARRAY_SIZE(_parents), \ ++ .hw.init = CLK_HW_INIT_PARENTS(_name, \ ++ _parents, &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ }, \ ++ } ++ ++static inline struct ccu_mix *hw_to_ccu_mix(struct clk_hw *hw) ++{ ++ struct ccu_common *common = hw_to_ccu_common(hw); ++ ++ return container_of(common, struct ccu_mix, common); ++} ++ ++extern const struct clk_ops ccu_mix_ops; ++ ++#endif /* _CCU_DIV_H_ */ +diff --git a/drivers/clk/spacemit/ccu_pll.c b/drivers/clk/spacemit/ccu_pll.c +new file mode 100644 +index 000000000000..9bc4d1de8b33 +--- /dev/null ++++ b/drivers/clk/spacemit/ccu_pll.c +@@ -0,0 +1,286 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Spacemit clock type pll ++ * ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "ccu_pll.h" ++ ++#define PLL_MIN_FREQ 600000000 ++#define PLL_MAX_FREQ 3400000000 ++#define PLL_DELAYTIME 590 ++ ++#define pll_readl(reg) readl(reg) ++#define pll_readl_pll_swcr1(p) pll_readl(p.base + p.reg_ctrl) ++#define pll_readl_pll_swcr2(p) pll_readl(p.base + p.reg_sel) ++#define pll_readl_pll_swcr3(p) pll_readl(p.base + p.reg_xtc) ++ ++#define pll_writel(val, reg) writel(val, reg) ++#define pll_writel_pll_swcr1(val, p) pll_writel(val, p.base + p.reg_ctrl) ++#define pll_writel_pll_swcr2(val, p) pll_writel(val, p.base + p.reg_sel) ++#define pll_writel_pll_swcr3(val, p) pll_writel(val, p.base + p.reg_xtc) ++ ++/* unified pllx_swcr1 for pll1~3 */ ++union pllx_swcr1 { ++ struct { ++ unsigned int reg5:8; ++ unsigned int reg6:8; ++ unsigned int reg7:8; ++ unsigned int reg8:8; ++ } b; ++ unsigned int v; ++}; ++ ++/* unified pllx_swcr2 for pll1~3 */ ++union pllx_swcr2 { ++ struct { ++ unsigned int div1_en:1; ++ unsigned int div2_en:1; ++ unsigned int div3_en:1; ++ unsigned int div4_en:1; ++ unsigned int div5_en:1; ++ unsigned int div6_en:1; ++ unsigned int div7_en:1; ++ unsigned int div8_en:1; ++ unsigned int reserved1:4; ++ unsigned int atest_en:1; ++ unsigned int cktest_en:1; ++ unsigned int dtest_en:1; ++ unsigned int rdo:2; ++ unsigned int mon_cfg:4; ++ unsigned int reserved2:11; ++ } b; ++ unsigned int v; ++}; ++ ++union pllx_swcr3 { ++ struct { ++ unsigned int div_frc:24; ++ unsigned int div_int:7; ++ unsigned int pll_en:1; ++ } b; ++ ++ unsigned int v; ++}; ++ ++static int ccu_pll_is_enabled(struct clk_hw *hw) ++{ ++ struct ccu_pll *p = hw_to_ccu_pll(hw); ++ union pllx_swcr3 swcr3; ++ unsigned int enabled; ++ ++ swcr3.v = pll_readl_pll_swcr3(p->common); ++ enabled = swcr3.b.pll_en; ++ ++ return enabled; ++} ++ ++static unsigned long __get_vco_freq(struct clk_hw *hw) ++{ ++ unsigned int reg5, reg6, reg7, reg8, size, i; ++ unsigned int div_int, div_frc; ++ struct ccu_pll_rate_tbl *freq_pll_regs_table, *pll_regs; ++ struct ccu_pll *p = hw_to_ccu_pll(hw); ++ union pllx_swcr1 swcr1; ++ union pllx_swcr3 swcr3; ++ ++ swcr1.v = pll_readl_pll_swcr1(p->common); ++ swcr3.v = pll_readl_pll_swcr3(p->common); ++ ++ reg5 = swcr1.b.reg5; ++ reg6 = swcr1.b.reg6; ++ reg7 = swcr1.b.reg7; ++ reg8 = swcr1.b.reg8; ++ ++ div_int = swcr3.b.div_int; ++ div_frc = swcr3.b.div_frc; ++ ++ freq_pll_regs_table = p->pll.rate_tbl; ++ size = p->pll.tbl_size; ++ ++ for (i = 0; i < size; i++) { ++ pll_regs = &freq_pll_regs_table[i]; ++ if (pll_regs->reg5 == reg5 && pll_regs->reg6 == reg6 && ++ pll_regs->reg7 == reg7 && pll_regs->reg8 == reg8 && ++ pll_regs->div_int == div_int && ++ pll_regs->div_frac == div_frc) ++ return pll_regs->rate; ++ } ++ ++ pr_err("Unknown rate for clock %s\n", __clk_get_name(hw->clk)); ++ ++ return 0; ++} ++ ++static int ccu_pll_enable(struct clk_hw *hw) ++{ ++ unsigned int delaytime = PLL_DELAYTIME; ++ unsigned long flags; ++ struct ccu_pll *p = hw_to_ccu_pll(hw); ++ union pllx_swcr3 swcr3; ++ ++ if (ccu_pll_is_enabled(hw)) ++ return 0; ++ ++ spin_lock_irqsave(p->common.lock, flags); ++ swcr3.v = pll_readl_pll_swcr3(p->common); ++ swcr3.b.pll_en = 1; ++ pll_writel_pll_swcr3(swcr3.v, p->common); ++ spin_unlock_irqrestore(p->common.lock, flags); ++ ++ /* check lock status */ ++ udelay(50); ++ ++ while ((!(readl(p->pll.lock_base + p->pll.reg_lock) ++ & p->pll.lock_enable_bit)) && delaytime) { ++ udelay(5); ++ delaytime--; ++ } ++ ++ if (unlikely(!delaytime)) { ++ pr_err("%s enabling didn't get stable within 3000us!!!\n", ++ __clk_get_name(hw->clk)); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static void ccu_pll_disable(struct clk_hw *hw) ++{ ++ unsigned long flags; ++ struct ccu_pll *p = hw_to_ccu_pll(hw); ++ union pllx_swcr3 swcr3; ++ ++ spin_lock_irqsave(p->common.lock, flags); ++ swcr3.v = pll_readl_pll_swcr3(p->common); ++ swcr3.b.pll_en = 0; ++ pll_writel_pll_swcr3(swcr3.v, p->common); ++ spin_unlock_irqrestore(p->common.lock, flags); ++} ++ ++/* ++ * pll rate change requires sequence: ++ * clock off -> change rate setting -> clock on ++ * This function doesn't really change rate, but cache the config ++ */ ++static int ccu_pll_set_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long parent_rate) ++{ ++ unsigned int i, reg5 = 0, reg6 = 0, reg7 = 0, reg8 = 0; ++ unsigned int div_int, div_frc; ++ unsigned long flags; ++ unsigned long new_rate = rate, old_rate; ++ struct ccu_pll *p = hw_to_ccu_pll(hw); ++ struct ccu_pll_config *params = &p->pll; ++ union pllx_swcr1 swcr1; ++ union pllx_swcr3 swcr3; ++ bool found = false; ++ bool pll_enabled = false; ++ ++ if (ccu_pll_is_enabled(hw)) { ++ pll_enabled = true; ++ ccu_pll_disable(hw); ++ } ++ ++ old_rate = __get_vco_freq(hw); ++ ++ /* setp 1: calculate fbd frcd kvco and band */ ++ if (params->rate_tbl) { ++ for (i = 0; i < params->tbl_size; i++) { ++ if (rate == params->rate_tbl[i].rate) { ++ found = true; ++ ++ reg5 = params->rate_tbl[i].reg5; ++ reg6 = params->rate_tbl[i].reg6; ++ reg7 = params->rate_tbl[i].reg7; ++ reg8 = params->rate_tbl[i].reg8; ++ div_int = params->rate_tbl[i].div_int; ++ div_frc = params->rate_tbl[i].div_frac; ++ break; ++ } ++ } ++ ++ WARN_ON_ONCE(!found); ++ } else { ++ pr_err("don't find freq table for pll\n"); ++ if (pll_enabled) ++ ccu_pll_enable(hw); ++ return -EINVAL; ++ } ++ ++ spin_lock_irqsave(p->common.lock, flags); ++ ++ /* setp 2: set pll kvco/band and fbd/frcd setting */ ++ swcr1.v = pll_readl_pll_swcr1(p->common); ++ swcr1.b.reg5 = reg5; ++ swcr1.b.reg6 = reg6; ++ swcr1.b.reg7 = reg7; ++ swcr1.b.reg8 = reg8; ++ pll_writel_pll_swcr1(swcr1.v, p->common); ++ ++ swcr3.v = pll_readl_pll_swcr3(p->common); ++ swcr3.b.div_int = div_int; ++ swcr3.b.div_frc = div_frc; ++ pll_writel_pll_swcr3(swcr3.v, p->common); ++ ++ spin_unlock_irqrestore(p->common.lock, flags); ++ ++ if (pll_enabled) ++ ccu_pll_enable(hw); ++ ++ pr_debug("%s %s rate %lu->%lu!\n", __func__, ++ __clk_get_name(hw->clk), old_rate, new_rate); ++ return 0; ++} ++ ++static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ return __get_vco_freq(hw); ++} ++ ++static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long *prate) ++{ ++ struct ccu_pll *p = hw_to_ccu_pll(hw); ++ unsigned long max_rate = 0; ++ unsigned int i; ++ struct ccu_pll_config *params = &p->pll; ++ ++ if (rate > PLL_MAX_FREQ || rate < PLL_MIN_FREQ) { ++ pr_err("%lu rate out of range!\n", rate); ++ return -EINVAL; ++ } ++ ++ if (params->rate_tbl) { ++ for (i = 0; i < params->tbl_size; i++) { ++ if (params->rate_tbl[i].rate <= rate) { ++ if (max_rate < params->rate_tbl[i].rate) ++ max_rate = params->rate_tbl[i].rate; ++ } ++ } ++ } else { ++ pr_err("don't find freq table for pll\n"); ++ } ++ ++ return max_rate; ++} ++ ++const struct clk_ops ccu_pll_ops = { ++ .enable = ccu_pll_enable, ++ .disable = ccu_pll_disable, ++ .set_rate = ccu_pll_set_rate, ++ .recalc_rate = ccu_pll_recalc_rate, ++ .round_rate = ccu_pll_round_rate, ++ .is_enabled = ccu_pll_is_enabled, ++}; ++ +diff --git a/drivers/clk/spacemit/ccu_pll.h b/drivers/clk/spacemit/ccu_pll.h +new file mode 100644 +index 000000000000..0f6f2ed397da +--- /dev/null ++++ b/drivers/clk/spacemit/ccu_pll.h +@@ -0,0 +1,79 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (c) 2023, spacemit Corporation. */ ++ ++#ifndef _CCU_PLL_H_ ++#define _CCU_PLL_H_ ++ ++#include ++#include ++#include "ccu-spacemit-k1x.h" ++ ++struct ccu_pll_rate_tbl { ++ unsigned long long rate; ++ u32 reg5; ++ u32 reg6; ++ u32 reg7; ++ u32 reg8; ++ unsigned int div_int; ++ unsigned int div_frac; ++}; ++ ++struct ccu_pll_config { ++ struct ccu_pll_rate_tbl *rate_tbl; ++ u32 tbl_size; ++ void __iomem *lock_base; ++ u32 reg_lock; ++ u32 lock_enable_bit; ++}; ++ ++#define PLL_RATE(_rate, _reg5, _reg6, _reg7, _reg8, _div_int, _div_frac) \ ++ { \ ++ .rate = (_rate), \ ++ .reg5 = (_reg5), \ ++ .reg6 = (_reg6), \ ++ .reg7 = (_reg7), \ ++ .reg8 = (_reg8), \ ++ .div_int = (_div_int), \ ++ .div_frac = (_div_frac), \ ++ } ++ ++struct ccu_pll { ++ struct ccu_pll_config pll; ++ struct ccu_common common; ++}; ++ ++#define _SPACEMIT_CCU_PLL_CONFIG(_table, _size, _reg_lock, _lock_enable_bit) \ ++ { \ ++ .rate_tbl = (struct ccu_pll_rate_tbl *)_table, \ ++ .tbl_size = _size, \ ++ .reg_lock = _reg_lock, \ ++ .lock_enable_bit = _lock_enable_bit, \ ++ } ++ ++#define SPACEMIT_CCU_PLL(_struct, _name, _table, _size, _base_type, \ ++ _reg_ctrl, _reg_sel, _reg_xtc, _reg_lock, \ ++ _lock_enable_bit, _is_pll, _flags) \ ++ struct ccu_pll _struct = { \ ++ .pll = _SPACEMIT_CCU_PLL_CONFIG(_table, _size, \ ++ _reg_lock, _lock_enable_bit), \ ++ .common = { \ ++ .reg_ctrl = _reg_ctrl, \ ++ .reg_sel = _reg_sel, \ ++ .reg_xtc = _reg_xtc, \ ++ .base_type = _base_type, \ ++ .is_pll = _is_pll, \ ++ .hw.init = CLK_HW_INIT_NO_PARENT(_name, \ ++ &ccu_pll_ops, _flags), \ ++ } \ ++ } ++ ++static inline struct ccu_pll *hw_to_ccu_pll(struct clk_hw *hw) ++{ ++ struct ccu_common *common = hw_to_ccu_common(hw); ++ ++ return container_of(common, struct ccu_pll, common); ++} ++ ++extern const struct clk_ops ccu_pll_ops; ++ ++#endif +diff --git a/drivers/clk/xuantie/Kconfig b/drivers/clk/xuantie/Kconfig +new file mode 100644 +index 000000000000..9a2ee8c01bf3 +--- /dev/null ++++ b/drivers/clk/xuantie/Kconfig +@@ -0,0 +1,12 @@ ++# SPDX-License-Identifier: GPL-2.0 ++ ++config XUANTIE_CLK ++ bool ++ def_bool ARCH_XUANTIE ++ ++config CLK_TH1520_FM ++ bool "XuanTie Th1520 Fullmask Clock Driver" ++ depends on ARCH_XUANTIE ++ default n ++ help ++ Build the driver for th1520 fullmask Clock Driver +diff --git a/drivers/clk/xuantie/Makefile b/drivers/clk/xuantie/Makefile +new file mode 100644 +index 000000000000..58e0ab431ae5 +--- /dev/null ++++ b/drivers/clk/xuantie/Makefile +@@ -0,0 +1,7 @@ ++# SPDX-License-Identifier: GPL-2.0 ++ ++obj-$(CONFIG_XUANTIE_CLK) += \ ++ clk.o ++ ++obj-$(CONFIG_CLK_TH1520_FM) += clk-th1520-fm.o ++obj-$(CONFIG_CLK_TH1520_FM) += gate/ +diff --git a/drivers/clk/xuantie/clk-th1520-fm.c b/drivers/clk/xuantie/clk-th1520-fm.c +new file mode 100644 +index 000000000000..33b5aa6127fa +--- /dev/null ++++ b/drivers/clk/xuantie/clk-th1520-fm.c +@@ -0,0 +1,646 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2021 Alibaba Group Holding Limited. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "clk.h" ++ ++static struct clk *clks[CLK_END]; ++static struct clk_onecell_data clk_data; ++ ++/* Th1520 Fullmask */ ++static u32 share_cnt_x2h_cpusys_clk_en; ++static u32 share_cnt_dmac_cpusys_clk_en; ++static u32 share_cnt_timer0_clk_en; ++static u32 share_cnt_timer1_clk_en; ++static u32 share_cnt_axi4_cpusys2_clk_en; ++static u32 share_cnt_bmu_c910_clk_en; ++static u32 share_cnt_aon2cpu_a2x_clk_en; ++static u32 share_cnt_chip_dbg_clk_en; ++static u32 share_cnt_x2x_cpusys_clk_en; ++static u32 share_cnt_cfg2tee_x2h_clk_en; ++static u32 share_cnt_cpu2aon_x2h_clk_en; ++static u32 share_cnt_cpu2vp_x2p_clk_en; ++static u32 share_cnt_npu_core_clk_en; ++static u32 share_cnt_cpu2peri_x2h_clk_en; ++static u32 share_cnt_cpu2vi_x2h_clk_en; ++static u32 share_cnt_vpsys_axi_aclk_en; ++static u32 share_cnt_gmac1_clk_en; ++static u32 share_cnt_gmac0_clk_en; ++static u32 share_cnt_perisys_apb3_hclk_en; ++static u32 share_cnt_qspi0_clk_en; ++static u32 share_cnt_gmac_axi_clk_en; ++static u32 share_cnt_gpio0_clk_en; ++static u32 share_cnt_gpio1_clk_en; ++static u32 share_cnt_pwm_clk_en; ++static u32 share_cnt_spi_clk_en; ++static u32 share_cnt_uart0_clk_en; ++static u32 share_cnt_uart2_clk_en; ++static u32 share_cnt_i2c2_clk_en; ++static u32 share_cnt_i2c3_clk_en; ++static u32 share_cnt_peri_i2s_clk_en; ++static u32 share_cnt_qspi1_clk_en; ++static u32 share_cnt_uart1_clk_en; ++static u32 share_cnt_uart3_clk_en; ++static u32 share_cnt_uart4_clk_en; ++static u32 share_cnt_uart5_clk_en; ++static u32 share_cnt_i2c0_clk_en; ++static u32 share_cnt_i2c1_clk_en; ++static u32 share_cnt_i2c4_clk_en; ++static u32 share_cnt_i2c5_clk_en; ++static u32 share_cnt_gpio2_clk_en; ++static u32 share_cnt_gpio3_clk_en; ++static u32 share_cnt_vosys_axi_aclk_en; ++ ++/* Th1520 Fullmask PLL Bypass */ ++static const char * const cpu_pll0_bypass_sels[] = {"cpu_pll0_foutpostdiv", "osc_24m", }; ++static const char * const cpu_pll1_bypass_sels[] = {"cpu_pll1_foutpostdiv", "osc_24m", }; ++static const char * const gmac_pll_bypass_sels[] = {"gmac_pll_foutpostdiv", "osc_24m", }; ++static const char * const video_pll_bypass_sels[] = {"video_pll_foutpostdiv", "osc_24m", }; ++static const char * const tee_pll_bypass_sels[] = {"tee_pll_foutpostdiv", "osc_24m"}; ++static const char * const dpu0_pll_bypass_sels[] = {"dpu0_pll_foutpostdiv", "osc_24m"}; ++static const char * const dpu1_pll_bypass_sels[] = {"dpu1_pll_foutpostdiv", "osc_24m"}; ++ ++/* th1520 fullmask mux */ ++static const char * const ahb2_cpusys_hclk_sels[] = {"ahb2_cpusys_hclk_out_div", "osc_24m"}; ++static const char * const c910_cclk_i0_sels[] = {"cpu_pll0_foutpostdiv", "osc_24m"}; ++static const char * const c910_cclk_sels[] = {"c910_cclk_i0", "cpu_pll1_foutpostdiv"}; ++static const char * const cfg_axi_aclk_sels[] = {"cfg_axi_aclk_out_div", "osc_24m"}; +static const char * const teesys_hclk_sels[] = {"teesys_i1_hclk", "teesys_i0_hclk"}; +static const char * const perisys_ahb_hclk_sels[] = {"perisys_ahb_hclk_out_div", "osc_24m"}; +static const char * const clk_out_1_sels[] = {"osc_24m", "clk_out_1_out_div"}; @@ -27850,11 +50506,35 @@ index 000000000000..8bf7a18776f8 + + return clk; +} +diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c +index da3071b387eb..16cf855f55c5 100644 +--- a/drivers/clocksource/timer-riscv.c ++++ b/drivers/clocksource/timer-riscv.c +@@ -24,7 +24,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + +@@ -212,6 +212,10 @@ TIMER_OF_DECLARE(riscv_timer, "riscv", riscv_timer_init_dt); + #ifdef CONFIG_ACPI + static int __init riscv_timer_acpi_init(struct acpi_table_header *table) + { ++ struct acpi_table_rhct *rhct = (struct acpi_table_rhct *)table; ++ ++ riscv_timer_cannot_wake_cpu = rhct->flags & ACPI_RHCT_TIMER_CANNOT_WAKEUP_CPU; ++ + return riscv_timer_init_common(); + } + diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig -index 6432eb93a80f..79434f69518c 100644 +index fd709abd3d0e..d1173f9c318f 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig -@@ -365,5 +365,14 @@ config QORIQ_CPUFREQ +@@ -356,5 +356,43 @@ config QORIQ_CPUFREQ This adds the CPUFreq driver support for Freescale QorIQ SoCs which are capable of changing the CPU's frequency dynamically. @@ -27868,22 +50548,95 @@ index 6432eb93a80f..79434f69518c 100644 + which are capable of changing the CPU's frequency dynamically. + endif ++ ++config ACPI_CPPC_CPUFREQ ++ tristate "CPUFreq driver based on the ACPI CPPC spec" ++ depends on ACPI_PROCESSOR ++ depends on ARM || ARM64 || RISCV ++ select ACPI_CPPC_LIB ++ help ++ This adds a CPUFreq driver which uses CPPC methods ++ as described in the ACPIv5.1 spec. CPPC stands for ++ Collaborative Processor Performance Controls. It ++ is based on an abstract continuous scale of CPU ++ performance values which allows the remote power ++ processor to flexibly optimize for power and ++ performance. CPPC relies on power management firmware ++ support for its operation. ++ ++ If in doubt, say N. ++ ++config ACPI_CPPC_CPUFREQ_FIE ++ bool "Frequency Invariance support for CPPC cpufreq driver" ++ depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY ++ depends on ARM || ARM64 || RISCV ++ default y ++ help ++ This extends frequency invariance support in the CPPC cpufreq driver, ++ by using CPPC delivered and reference performance counters. ++ ++ If in doubt, say N. ++ endmenu +diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm +index 6265c91fbf6b..8045a6d928a4 100644 +--- a/drivers/cpufreq/Kconfig.arm ++++ b/drivers/cpufreq/Kconfig.arm +@@ -3,22 +3,6 @@ + # ARM CPU Frequency scaling drivers + # + +-config ACPI_CPPC_CPUFREQ +- tristate "CPUFreq driver based on the ACPI CPPC spec" +- depends on ACPI_PROCESSOR +- select ACPI_CPPC_LIB +- help +- This adds a CPUFreq driver which uses CPPC methods +- as described in the ACPIv5.1 spec. CPPC stands for +- Collaborative Processor Performance Controls. It +- is based on an abstract continuous scale of CPU +- performance values which allows the remote power +- processor to flexibly optimize for power and +- performance. CPPC relies on power management firmware +- support for its operation. +- +- If in doubt, say N. +- + config CPPC_CPUFREQ_SYSFS_INTERFACE + bool "Enable CPPC CPUFreq sysfs tuning interfaces" + depends on ACPI_CPPC_CPUFREQ && ARM64 +@@ -30,16 +14,6 @@ config CPPC_CPUFREQ_SYSFS_INTERFACE + + If unsure, say N. + +-config ACPI_CPPC_CPUFREQ_FIE +- bool "Frequency Invariance support for CPPC cpufreq driver" +- depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY +- default y +- help +- This extends frequency invariance support in the CPPC cpufreq driver, +- by using CPPC delivered and reference performance counters. +- +- If in doubt, say N. +- + config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM + tristate "Allwinner nvmem based SUN50I CPUFreq driver" + depends on ARCH_SUNXI diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile -index 0950869f1809..df2bb1149b4a 100644 +index 46c3aa314f97..63f81fbda8ba 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile -@@ -111,3 +111,4 @@ obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o +@@ -110,3 +110,4 @@ obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o + obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o - obj-$(CONFIG_SW64_CPUFREQ) += sw64_cpufreq.o - obj-$(CONFIG_SW64_CPUFREQ_DEBUGFS) += sw64_cpufreq_debugfs.o + obj-$(CONFIG_SW64_CPUFREQ) += sunway-cpufreq.o +obj-$(CONFIG_RISCV_XUANTIE_TH1520_CPUFREQ) += th1520-cpufreq.o diff --git a/drivers/cpufreq/th1520-cpufreq.c b/drivers/cpufreq/th1520-cpufreq.c new file mode 100644 -index 000000000000..6e4186808c96 +index 000000000000..ef157fd3cdf5 --- /dev/null +++ b/drivers/cpufreq/th1520-cpufreq.c -@@ -0,0 +1,584 @@ +@@ -0,0 +1,588 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2021 Alibaba Group Holding Limited. @@ -27969,19 +50722,23 @@ index 000000000000..6e4186808c96 + +static int _th1520_switch_pllid(int pllid, int target_freq) +{ ++ int ret; ++ + pr_debug("[%s] switch to pll[%d], freq[%u]\n", __func__, pllid, target_freq); + if (pllid == TH1520_CPU_PLL_IDX(1)) { + clk_prepare_enable(clks[TH1520_CPU_PLL1_FOUTPOSTDIV].clk); + clk_set_rate(clks[TH1520_CPU_PLL1_FOUTPOSTDIV].clk, target_freq * 1000); -+ clk_set_parent(clks[TH1520_C910_CCLK].clk, clks[TH1520_CPU_PLL1_FOUTPOSTDIV].clk); ++ ret = clk_set_parent(clks[TH1520_C910_CCLK].clk, clks[TH1520_CPU_PLL1_FOUTPOSTDIV].clk); + udelay(1); -+ clk_disable_unprepare(clks[TH1520_CPU_PLL0_FOUTPOSTDIV].clk); ++ if (ret) ++ clk_disable_unprepare(clks[TH1520_CPU_PLL0_FOUTPOSTDIV].clk); + } else { + clk_prepare_enable(clks[TH1520_CPU_PLL0_FOUTPOSTDIV].clk); + clk_set_rate(clks[TH1520_CPU_PLL0_FOUTPOSTDIV].clk, target_freq * 1000); -+ clk_set_parent(clks[TH1520_C910_CCLK].clk, clks[TH1520_C910_CCLK_I0].clk); ++ ret = clk_set_parent(clks[TH1520_C910_CCLK].clk, clks[TH1520_C910_CCLK_I0].clk); + udelay(1); -+ clk_disable_unprepare(clks[TH1520_CPU_PLL1_FOUTPOSTDIV].clk); ++ if (ret) ++ clk_disable_unprepare(clks[TH1520_CPU_PLL1_FOUTPOSTDIV].clk); + } + + return 0; @@ -28468,6 +51225,133 @@ index 000000000000..6e4186808c96 +MODULE_AUTHOR("fugang.duan "); +MODULE_DESCRIPTION("XuanTie TH1520 cpufreq driver"); +MODULE_LICENSE("GPL"); +diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c +index 71d433bb0ce6..50d128a4b343 100644 +--- a/drivers/cpuidle/cpuidle-riscv-sbi.c ++++ b/drivers/cpuidle/cpuidle-riscv-sbi.c +@@ -74,26 +74,6 @@ static inline bool sbi_is_domain_state_available(void) + return data->available; + } + +-static int sbi_suspend_finisher(unsigned long suspend_type, +- unsigned long resume_addr, +- unsigned long opaque) +-{ +- struct sbiret ret; +- +- ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, +- suspend_type, resume_addr, opaque, 0, 0, 0); +- +- return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0; +-} +- +-static int sbi_suspend(u32 state) +-{ +- if (state & SBI_HSM_SUSP_NON_RET_BIT) +- return cpu_suspend(state, sbi_suspend_finisher); +- else +- return sbi_suspend_finisher(state, 0, 0); +-} +- + static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int idx) + { +@@ -101,9 +81,9 @@ static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev, + u32 state = states[idx]; + + if (state & SBI_HSM_SUSP_NON_RET_BIT) +- return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, state); ++ return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend, idx, state); + else +- return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(sbi_suspend, ++ return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend, + idx, state); + } + +@@ -134,7 +114,7 @@ static __cpuidle int __sbi_enter_domain_idle_state(struct cpuidle_device *dev, + else + state = states[idx]; + +- ret = sbi_suspend(state) ? -1 : idx; ++ ret = riscv_sbi_hart_suspend(state) ? -1 : idx; + + ct_cpuidle_exit(); + +@@ -207,17 +187,6 @@ static const struct of_device_id sbi_cpuidle_state_match[] = { + { }, + }; + +-static bool sbi_suspend_state_is_valid(u32 state) +-{ +- if (state > SBI_HSM_SUSPEND_RET_DEFAULT && +- state < SBI_HSM_SUSPEND_RET_PLATFORM) +- return false; +- if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT && +- state < SBI_HSM_SUSPEND_NON_RET_PLATFORM) +- return false; +- return true; +-} +- + static int sbi_dt_parse_state_node(struct device_node *np, u32 *state) + { + int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state); +@@ -227,7 +196,7 @@ static int sbi_dt_parse_state_node(struct device_node *np, u32 *state) + return err; + } + +- if (!sbi_suspend_state_is_valid(*state)) { ++ if (!riscv_sbi_suspend_state_is_valid(*state)) { + pr_warn("Invalid SBI suspend state %#x\n", *state); + return -EINVAL; + } +@@ -600,16 +569,8 @@ static int __init sbi_cpuidle_init(void) + int ret; + struct platform_device *pdev; + +- /* +- * The SBI HSM suspend function is only available when: +- * 1) SBI version is 0.3 or higher +- * 2) SBI HSM extension is available +- */ +- if ((sbi_spec_version < sbi_mk_version(0, 3)) || +- !sbi_probe_extension(SBI_EXT_HSM)) { +- pr_info("HSM suspend not available\n"); ++ if (!riscv_sbi_hsm_is_supported()) + return 0; +- } + + ret = platform_driver_register(&sbi_cpuidle_driver); + if (ret) +diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig +index e36506471a4f..83510ecf37c3 100644 +--- a/drivers/dma/Kconfig ++++ b/drivers/dma/Kconfig +@@ -489,6 +489,13 @@ config OWL_DMA + help + Enable support for the Actions Semi Owl SoCs DMA controller. + ++config SPACEMIT_K1_DMA ++ bool "Spacemit k1 SoCs DMA support" ++ depends on SOC_SPACEMIT_K1X ++ depends on DMA_ENGINE ++ help ++ Enable support for the Spacemit k1 SoCs DMA controller. ++ + config PCH_DMA + tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA" + depends on PCI && (X86_32 || COMPILE_TEST) +diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile +index 83553a97a010..2c9e8b993375 100644 +--- a/drivers/dma/Makefile ++++ b/drivers/dma/Makefile +@@ -66,6 +66,7 @@ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ + obj-$(CONFIG_PXA_DMA) += pxa_dma.o + obj-$(CONFIG_RENESAS_DMA) += sh/ + obj-$(CONFIG_SF_PDMA) += sf-pdma/ ++obj-$(CONFIG_SPACEMIT_K1_DMA) += spacemit-k1-dma.o + obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o + obj-$(CONFIG_STM32_DMA) += stm32-dma.o + obj-$(CONFIG_STM32_DMAMUX) += stm32-dmamux.o diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index 72fb40de58b3..2235a15930bc 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -28803,10 +51687,1540 @@ index 834ae519c15d..f2b299c23b1e 100644 if (rc) return rc; +diff --git a/drivers/dma/spacemit-k1-dma.c b/drivers/dma/spacemit-k1-dma.c +new file mode 100644 +index 000000000000..d730ad085e0b +--- /dev/null ++++ b/drivers/dma/spacemit-k1-dma.c +@@ -0,0 +1,1515 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * mmp dma controller driver ++ * Copyright 2012 Marvell International Ltd. ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#include "dmaengine.h" ++ ++#define DDADRH(n) (0x0300 + ((n) << 4)) ++#define DSADRH(n) (0x0304 + ((n) << 4)) ++#define DTADRH(n) (0x0308 + ((n) << 4)) ++#define DCSR_LPAEEN BIT(21) ++#define DRCMR_INVALID 100 ++#define DCMD_BURST64 (4 << 16) ++ ++#define DCSR 0x0000 ++#define DALGN 0x00a0 ++#define DINT 0x00f0 ++#define DDADR 0x0200 ++#define DSADR(n) (0x0204 + ((n) << 4)) ++#define DTADR(n) (0x0208 + ((n) << 4)) ++#define DCMD 0x020c ++ ++#define DCSR_RUN BIT(31) /* Run Bit (read / write) */ ++#define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */ ++#define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */ ++#define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */ ++#define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */ ++#define DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */ ++#define DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */ ++#define DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */ ++ ++#define DCSR_EORIRQEN BIT(28) /* End of Receive Interrupt Enable (R/W) */ ++#define DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */ ++#define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */ ++#define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */ ++#define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */ ++#define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */ ++#define DCSR_EORINTR BIT(9) /* The end of Receive */ ++ ++#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2)) ++#define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */ ++#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ ++ ++#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ ++#define DDADR_STOP BIT(0) /* Stop (read / write) */ ++ ++#define DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */ ++#define DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */ ++#define DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */ ++#define DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */ ++#define DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */ ++#define DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */ ++#define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */ ++#define DCMD_BURST8 (1 << 16) /* 8 byte burst */ ++#define DCMD_BURST16 (2 << 16) /* 16 byte burst */ ++#define DCMD_BURST32 (3 << 16) /* 32 byte burst */ ++#define DCMD_WIDTH1 (1 << 14) /* 1 byte width */ ++#define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */ ++#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ ++#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ ++ ++#define PDMA_MAX_DESC_BYTES DCMD_LENGTH ++#define PDMA_RESRV_CHAN_ARGS_NUM (2) /* reserved channel arguments count in dts */ ++ ++struct mmp_pdma_desc_hw { ++ u32 ddadr; /* Points to the next descriptor + flags */ ++ u32 dsadr; /* DSADR value for the current transfer */ ++ u32 dtadr; /* DTADR value for the current transfer */ ++ u32 dcmd; /* DCMD value for the current transfer */ ++ u32 ddadrh; /* Points to the next descriptor + flags */ ++ u32 dsadrh; /* DSADR value for the current transfer */ ++ u32 dtadrh; /* DTADR value for the current transfer */ ++ u32 rsvd; /* DCMD value for the current transfer */ ++} __aligned(64); ++ ++struct mmp_pdma_desc_sw { ++ struct mmp_pdma_desc_hw desc; ++ struct list_head node; ++ struct list_head tx_list; ++ struct dma_async_tx_descriptor async_tx; ++}; ++ ++struct mmp_pdma_phy; ++ ++struct mmp_pdma_chan { ++ struct device *dev; ++ struct dma_chan chan; ++ struct dma_async_tx_descriptor desc; ++ struct mmp_pdma_phy *phy; ++ enum dma_transfer_direction dir; ++ struct dma_slave_config slave_config; ++ struct mmp_pdma_desc_sw *cyclic_first; ++ ++ /* channel's basic info */ ++ struct tasklet_struct tasklet; ++ u32 dcmd; ++ u32 drcmr; ++ u32 dev_addr; ++ ++ /* list for desc */ ++ spinlock_t desc_lock; /* Descriptor list lock */ ++ struct list_head chain_pending; /* Link descriptors queue for pending */ ++ struct list_head chain_running; /* Link descriptors queue for running */ ++ bool idle; /* channel statue machine */ ++ bool byte_align; ++ ++ int user_do_qos; ++ int qos_count; /* Per-channel qos count */ ++ enum dma_status status; /* channel state machine */ ++ u32 bytes_residue; ++ ++ struct dma_pool *desc_pool; /* Descriptors pool */ ++}; ++ ++struct mmp_pdma_phy { ++ int idx; ++ void __iomem *base; ++ struct mmp_pdma_chan *vchan; ++}; ++ ++struct reserved_chan { ++ int chan_id; ++ int drcmr; ++}; ++ ++struct mmp_pdma_device { ++ int dma_channels; ++ int nr_reserved_channels; ++ struct reserved_chan *reserved_channels; ++ s32 lpm_qos; ++ struct clk *clk; ++ struct reset_control *resets; ++ int max_burst_size; ++ void __iomem *base; ++ struct device *dev; ++ struct dma_device device; ++ struct mmp_pdma_phy *phy; ++ spinlock_t phy_lock; /* protect alloc/free phy channels */ ++}; ++ ++#define tx_to_mmp_pdma_desc(tx) \ ++ container_of(tx, struct mmp_pdma_desc_sw, async_tx) ++#define to_mmp_pdma_desc(lh) \ ++ container_of(lh, struct mmp_pdma_desc_sw, node) ++#define to_mmp_pdma_chan(dchan) \ ++ container_of(dchan, struct mmp_pdma_chan, chan) ++#define to_mmp_pdma_dev(dmadev) \ ++ container_of(dmadev, struct mmp_pdma_device, device) ++ ++static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) ++{ ++ u32 ddadrh; ++ u32 reg = (phy->idx << 4) + DDADR; ++ ++ writel(addr & 0xffffffff, phy->base + reg); ++ /* config higher bits for desc address */ ++ ddadrh = (addr >> 32); ++ writel(ddadrh, phy->base + DDADRH(phy->idx)); ++} ++ ++static void enable_chan(struct mmp_pdma_phy *phy) ++{ ++ u32 reg, dalgn; ++ u32 dcsr; ++ unsigned long flags; ++ struct mmp_pdma_device *pdev; ++ ++ if (phy == NULL) ++ return; ++ ++ if (!phy->vchan) ++ return; ++ ++ pdev = to_mmp_pdma_dev(phy->vchan->chan.device); ++ ++ spin_lock_irqsave(&pdev->phy_lock, flags); ++ ++ reg = DRCMR(phy->vchan->drcmr); ++ writel(DRCMR_MAPVLD | phy->idx, phy->base + reg); ++ ++ dalgn = readl(phy->base + DALGN); ++ if (phy->vchan->byte_align) ++ dalgn |= 1 << phy->idx; ++ else ++ dalgn &= ~(1 << phy->idx); ++ writel(dalgn, phy->base + DALGN); ++ ++ reg = (phy->idx << 2) + DCSR; ++ ++ dcsr = readl(phy->base + reg); ++ dcsr |= (DCSR_RUN | DCSR_EORIRQEN | DCSR_EORSTOPEN); ++ /* use long descriptor mode: set DCSR_LPAEEN bit */ ++ dcsr |= DCSR_LPAEEN; ++ writel(dcsr, phy->base + reg); ++ ++ spin_unlock_irqrestore(&pdev->phy_lock, flags); ++} ++ ++static void disable_chan(struct mmp_pdma_phy *phy) ++{ ++ u32 reg; ++ u32 dcsr, cnt = 1000; ++ ++ if (!phy) ++ return; ++ ++ reg = (phy->idx << 2) + DCSR; ++ ++ dcsr = readl(phy->base + reg); ++ dcsr &= ~(DCSR_RUN | DCSR_EORIRQEN | DCSR_EORSTOPEN); ++ /* use long descriptor mode: set DCSR_LPAEEN bit */ ++ dcsr &= ~DCSR_LPAEEN; ++ writel(dcsr, phy->base + reg); ++ ++ /* ensure dma is stopped. */ ++ dcsr = readl(phy->base + reg); ++ while (!(dcsr & (0x1 << 3)) && --cnt) { ++ udelay(10); ++ dcsr = readl(phy->base + reg); ++ } ++ ++ WARN_ON(!cnt); ++} ++ ++static int clear_chan_irq(struct mmp_pdma_phy *phy) ++{ ++ u32 dcsr; ++ u32 dint = readl(phy->base + DINT); ++ u32 reg = (phy->idx << 2) + DCSR; ++ ++ if (!(dint & BIT(phy->idx))) ++ return -EAGAIN; ++ ++ /* clear irq */ ++ dcsr = readl(phy->base + reg); ++ writel(dcsr, phy->base + reg); ++ if ((dcsr & DCSR_BUSERR) && (phy->vchan)) ++ dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); ++ ++ return 0; ++} ++ ++static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id) ++{ ++ struct mmp_pdma_phy *phy = dev_id; ++ struct mmp_pdma_chan *pchan = phy->vchan; ++ ++ if (clear_chan_irq(phy) != 0) ++ return IRQ_NONE; ++ ++ if (pchan) ++ tasklet_schedule(&pchan->tasklet); ++ ++ return IRQ_HANDLED; ++} ++ ++static bool is_channel_reserved(struct mmp_pdma_device *pdev, int chan_id) ++{ ++ int i; ++ ++ for (i = 0; i < pdev->nr_reserved_channels; i++) { ++ if (chan_id == pdev->reserved_channels[i].chan_id) ++ return true; ++ } ++ ++ return false; ++} ++ ++static struct mmp_pdma_phy *lookup_phy_for_drcmr(struct mmp_pdma_device *pdev, int drcmr) ++{ ++ int i; ++ int chan_id; ++ struct mmp_pdma_phy *phy; ++ ++ for (i = 0; i < pdev->nr_reserved_channels; i++) { ++ if (drcmr == pdev->reserved_channels[i].drcmr) { ++ chan_id = pdev->reserved_channels[i].chan_id; ++ phy = &pdev->phy[chan_id]; ++ return phy; ++ } ++ } ++ ++ return NULL; ++} ++ ++static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) ++{ ++ struct mmp_pdma_device *pdev = dev_id; ++ struct mmp_pdma_phy *phy; ++ u32 dint = readl(pdev->base + DINT); ++ int i, ret; ++ int irq_num = 0; ++ unsigned long flags; ++ ++ while (dint) { ++ i = __ffs(dint); ++ /* only handle interrupts belonging to pdma driver*/ ++ if (i >= pdev->dma_channels) ++ break; ++ ++ dint &= (dint - 1); ++ phy = &pdev->phy[i]; ++ spin_lock_irqsave(&pdev->phy_lock, flags); ++ ++ ret = mmp_pdma_chan_handler(irq, phy); ++ ++ spin_unlock_irqrestore(&pdev->phy_lock, flags); ++ if (ret == IRQ_HANDLED) ++ irq_num++; ++ } ++ ++ if (irq_num) ++ return IRQ_HANDLED; ++ ++ return IRQ_NONE; ++} ++ ++/* ++ * lookup free phy channel as descending priority ++ * dma channel priorities ++ * ch 0 - 3, 16 - 19 <--> (0) ++ * ch 4 - 7, 20 - 23 <--> (1) ++ * ch 8 - 11, 24 - 27 <--> (2) ++ * ch 12 - 15, 28 - 31 <--> (3) ++ */ ++static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan) ++{ ++ int prio, i; ++ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); ++ struct mmp_pdma_phy *phy, *found = NULL; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&pdev->phy_lock, flags); ++ ++ phy = lookup_phy_for_drcmr(pdev, pchan->drcmr); ++ ++ if (phy != NULL) { ++ if (!phy->vchan) { ++ phy->vchan = pchan; ++ found = phy; ++ } ++ ++ goto out_unlock; ++ } ++ ++ for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) { ++ for (i = 0; i < pdev->dma_channels; i++) { ++ if (prio != (i & 0xf) >> 2) ++ continue; ++ ++ if (is_channel_reserved(pdev, i)) ++ continue; ++ phy = &pdev->phy[i]; ++ if (!phy->vchan) { ++ phy->vchan = pchan; ++ found = phy; ++ goto out_unlock; ++ } ++ } ++ } ++ ++out_unlock: ++ spin_unlock_irqrestore(&pdev->phy_lock, flags); ++ return found; ++} ++ ++static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan) ++{ ++ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); ++ unsigned long flags; ++ u32 reg; ++ ++ if (!pchan->phy) ++ return; ++ ++ /* clear the channel mapping in DRCMR */ ++ reg = DRCMR(pchan->drcmr); ++ writel(0, pchan->phy->base + reg); ++ ++ spin_lock_irqsave(&pdev->phy_lock, flags); ++ pchan->phy->vchan = NULL; ++ pchan->phy = NULL; ++ ++ spin_unlock_irqrestore(&pdev->phy_lock, flags); ++} ++ ++/* ++ * start_pending_queue - transfer any pending transactions ++ * pending list ==> running list ++ */ ++static int start_pending_queue(struct mmp_pdma_chan *chan) ++{ ++ struct mmp_pdma_desc_sw *desc; ++ struct mmp_pdma_desc_sw *_desc; ++ ++ /* still in running, irq will start the pending list */ ++ if (chan->status == DMA_IN_PROGRESS) { ++ dev_dbg(chan->dev, "DMA controller still busy\n"); ++ return -1; ++ } ++ ++ if (list_empty(&chan->chain_pending)) { ++ /* chance to re-fetch phy channel with higher prio */ ++ mmp_pdma_free_phy(chan); ++ dev_dbg(chan->dev, "no pending list\n"); ++ ++ return -1; ++ } ++ ++ if (!chan->phy) { ++ chan->phy = lookup_phy(chan); ++ if (!chan->phy) { ++ dev_dbg(chan->dev, "no free dma channel\n"); ++ ++ return -1; ++ } ++ } ++ ++ /* ++ * pending -> running ++ * reintilize pending list ++ */ ++ list_for_each_entry_safe(desc, _desc, &chan->chain_pending, node) { ++ list_del(&desc->node); ++ list_add_tail(&desc->node, &chan->chain_running); ++ if (desc->desc.ddadr & DDADR_STOP) ++ break; ++ } ++ ++ desc = list_first_entry(&chan->chain_running, ++ struct mmp_pdma_desc_sw, node); ++ ++ /* ++ * Program the descriptor's address into the DMA controller, ++ * then start the DMA transaction ++ */ ++ set_desc(chan->phy, desc->async_tx.phys); ++ enable_chan(chan->phy); ++ chan->idle = false; ++ chan->status = DMA_IN_PROGRESS; ++ chan->bytes_residue = 0; ++ return 0; ++} ++ ++/* desc->tx_list ==> pending list */ ++static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx) ++{ ++ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan); ++ struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx); ++ struct mmp_pdma_desc_sw *child; ++ unsigned long flags; ++ dma_cookie_t cookie = -EBUSY; ++ ++ spin_lock_irqsave(&chan->desc_lock, flags); ++ ++ list_for_each_entry(child, &desc->tx_list, node) { ++ cookie = dma_cookie_assign(&child->async_tx); ++ } ++ ++ /* softly link to pending list - desc->tx_list ==> pending list */ ++ list_splice_tail_init(&desc->tx_list, &chan->chain_pending); ++ ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++ ++ return cookie; ++} ++ ++static int mmp_pdma_config_write(struct dma_chan *dchan, ++ struct dma_slave_config *cfg, ++ enum dma_transfer_direction direction) ++{ ++ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); ++ u32 maxburst = 0, addr = 0; ++ enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; ++ ++ if (!dchan) ++ return -EINVAL; ++ ++ if (direction == DMA_DEV_TO_MEM) { ++ chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; ++ maxburst = cfg->src_maxburst; ++ width = cfg->src_addr_width; ++ addr = cfg->src_addr; ++ } else if (direction == DMA_MEM_TO_DEV) { ++ chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; ++ maxburst = cfg->dst_maxburst; ++ width = cfg->dst_addr_width; ++ addr = cfg->dst_addr; ++ } ++ ++ if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) ++ chan->dcmd |= DCMD_WIDTH1; ++ else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) ++ chan->dcmd |= DCMD_WIDTH2; ++ else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES) ++ chan->dcmd |= DCMD_WIDTH4; ++ ++ if (maxburst == 8) ++ chan->dcmd |= DCMD_BURST8; ++ else if (maxburst == 16) ++ chan->dcmd |= DCMD_BURST16; ++ else if (maxburst == 32) ++ chan->dcmd |= DCMD_BURST32; ++ ++ chan->dir = direction; ++ chan->dev_addr = addr; ++ ++ return 0; ++} ++ ++static struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan) ++{ ++ struct mmp_pdma_desc_sw *desc; ++ dma_addr_t pdesc; ++ ++ desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc); ++ if (!desc) { ++ dev_err(chan->dev, "out of memory for link descriptor\n"); ++ return NULL; ++ } ++ ++ INIT_LIST_HEAD(&desc->tx_list); ++ dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); ++ /* each desc has submit */ ++ desc->async_tx.tx_submit = mmp_pdma_tx_submit; ++ desc->async_tx.phys = pdesc; ++ ++ return desc; ++} ++ ++/* ++ * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel. ++ * ++ * This function will create a dma pool for descriptor allocation. ++ * Request irq only when channel is requested ++ * Return - The number of allocated descriptors. ++ */ ++static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan) ++{ ++ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); ++ ++ if (chan->desc_pool) ++ return 1; ++ ++ chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device), ++ chan->dev, ++ sizeof(struct mmp_pdma_desc_sw), ++ __alignof__(struct mmp_pdma_desc_sw), ++ 0); ++ if (!chan->desc_pool) { ++ dev_err(chan->dev, "unable to allocate descriptor pool\n"); ++ return -ENOMEM; ++ } ++ ++ chan->status = DMA_COMPLETE; ++ chan->dir = 0; ++ chan->dcmd = 0; ++ ++ mmp_pdma_free_phy(chan); ++ ++ chan->idle = true; ++ chan->dev_addr = 0; ++ return 1; ++} ++ ++static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan, ++ struct list_head *list) ++{ ++ struct mmp_pdma_desc_sw *desc, *_desc; ++ ++ list_for_each_entry_safe(desc, _desc, list, node) { ++ list_del(&desc->node); ++ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); ++ } ++} ++ ++static void mmp_pdma_free_chan_resources(struct dma_chan *dchan) ++{ ++ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); ++ unsigned long flags; ++ ++ /* wait until task ends if necessary */ ++ tasklet_kill(&chan->tasklet); ++ ++ spin_lock_irqsave(&chan->desc_lock, flags); ++ mmp_pdma_free_desc_list(chan, &chan->chain_pending); ++ mmp_pdma_free_desc_list(chan, &chan->chain_running); ++ ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++ ++ dma_pool_destroy(chan->desc_pool); ++ chan->desc_pool = NULL; ++ chan->idle = true; ++ chan->dev_addr = 0; ++ ++ chan->status = DMA_COMPLETE; ++ chan->dir = 0; ++ chan->dcmd = 0; ++ ++ mmp_pdma_free_phy(chan); ++} ++ ++/* ++ * Per-channel qos get/put function. This function ensures that pm_ ++ * runtime_get/put are not called multi times for one channel. ++ * This guarantees pm_runtime_get/put always match for the entire device. ++ */ ++static void mmp_pdma_qos_get(struct mmp_pdma_chan *chan) ++{ ++ unsigned long flags; ++ ++ if (chan->user_do_qos) ++ return; ++ ++ spin_lock_irqsave(&chan->desc_lock, flags); ++ if (chan->qos_count == 0) { ++ chan->qos_count = 1; ++ /* ++ * Safe in spin_lock because it's marked as irq safe. ++ * Similar case for mmp_pdma_qos_put(). ++ */ ++ pm_runtime_get_sync(chan->dev); ++ } ++ ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++} ++ ++static void mmp_pdma_qos_put(struct mmp_pdma_chan *chan) ++{ ++ unsigned long flags; ++ ++ if (chan->user_do_qos) ++ return; ++ ++ spin_lock_irqsave(&chan->desc_lock, flags); ++ if (chan->qos_count == 1) { ++ chan->qos_count = 0; ++ pm_runtime_put_autosuspend(chan->dev); ++ } ++ ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++} ++ ++#define INVALID_BURST_SETTING -1 ++#define DEFAULT_MAX_BURST_SIZE 32 ++ ++static int get_max_burst_setting(unsigned int max_burst_size) ++{ ++ switch (max_burst_size) { ++ case 8: ++ return DCMD_BURST8; ++ case 16: ++ return DCMD_BURST16; ++ case 32: ++ return DCMD_BURST32; ++ case 64: ++ return DCMD_BURST64; ++ default: ++ return INVALID_BURST_SETTING; ++ } ++} ++ ++static struct dma_async_tx_descriptor * ++mmp_pdma_prep_memcpy(struct dma_chan *dchan, ++ dma_addr_t dma_dst, dma_addr_t dma_src, ++ size_t len, unsigned long flags) ++{ ++ struct mmp_pdma_chan *chan; ++ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; ++ size_t copy = 0; ++ struct mmp_pdma_device *dev; ++ int value; ++ ++ if (!dchan) ++ return NULL; ++ ++ if (!len) ++ return NULL; ++ ++ chan = to_mmp_pdma_chan(dchan); ++ chan->byte_align = false; ++ ++ if (!chan->dir) { ++ chan->dir = DMA_MEM_TO_MEM; ++ chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR; ++ dev = to_mmp_pdma_dev(dchan->device); ++ value = get_max_burst_setting(dev->max_burst_size); ++ ++ WARN_ON(value == INVALID_BURST_SETTING); ++ ++ chan->dcmd |= value; ++ } ++ ++ do { ++ /* Allocate the link descriptor from DMA pool */ ++ new = mmp_pdma_alloc_descriptor(chan); ++ if (!new) { ++ dev_err(chan->dev, "no memory for desc\n"); ++ goto fail; ++ } ++ ++ copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); ++ if (dma_src & 0x7 || dma_dst & 0x7) ++ chan->byte_align = true; ++ ++ new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); ++ ++ /* ++ * Check whether descriptor/source-addr/target-addr is in ++ * region higher than 4G. If so, set related higher bits to 1. ++ */ ++ if (chan->dir == DMA_MEM_TO_DEV) { ++ new->desc.dsadr = dma_src & 0xffffffff; ++ new->desc.dtadr = dma_dst; ++ new->desc.dsadrh = (dma_src >> 32); ++ new->desc.dtadrh = 0; ++ } else if (chan->dir == DMA_DEV_TO_MEM) { ++ new->desc.dsadr = dma_src; ++ new->desc.dtadr = dma_dst & 0xffffffff; ++ new->desc.dsadrh = 0; ++ new->desc.dtadrh = (dma_dst >> 32); ++ } else if (chan->dir == DMA_MEM_TO_MEM) { ++ new->desc.dsadr = dma_src & 0xffffffff; ++ new->desc.dtadr = dma_dst & 0xffffffff; ++ new->desc.dsadrh = (dma_src >> 32); ++ new->desc.dtadrh = (dma_dst >> 32); ++ } else { ++ dev_err(chan->dev, "wrong direction: 0x%x\n", chan->dir); ++ goto fail; ++ } ++ ++ if (!first) ++ first = new; ++ else { ++ prev->desc.ddadr = new->async_tx.phys; ++ prev->desc.ddadrh = (new->async_tx.phys >> 32); ++ } ++ ++ new->async_tx.cookie = 0; ++ async_tx_ack(&new->async_tx); ++ ++ prev = new; ++ len -= copy; ++ ++ if (chan->dir == DMA_MEM_TO_DEV) { ++ dma_src += copy; ++ } else if (chan->dir == DMA_DEV_TO_MEM) { ++ dma_dst += copy; ++ } else if (chan->dir == DMA_MEM_TO_MEM) { ++ dma_src += copy; ++ dma_dst += copy; ++ } ++ ++ /* Insert the link descriptor to the LD ring */ ++ list_add_tail(&new->node, &first->tx_list); ++ } while (len); ++ ++ first->async_tx.flags = flags; /* client is in control of this ack */ ++ first->async_tx.cookie = -EBUSY; ++ ++ /* last desc and fire IRQ */ ++ new->desc.ddadr = DDADR_STOP; ++ new->desc.dcmd |= DCMD_ENDIRQEN; ++ ++ chan->cyclic_first = NULL; ++ ++ return &first->async_tx; ++ ++fail: ++ if (first) ++ mmp_pdma_free_desc_list(chan, &first->tx_list); ++ return NULL; ++} ++ ++static struct dma_async_tx_descriptor * ++mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, ++ unsigned int sg_len, enum dma_transfer_direction dir, ++ unsigned long flags, void *context) ++{ ++ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); ++ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; ++ size_t len, avail; ++ struct scatterlist *sg; ++ dma_addr_t addr; ++ int i; ++ ++ if ((sgl == NULL) || (sg_len == 0)) ++ return NULL; ++ ++ chan->byte_align = true; ++ ++ mmp_pdma_config_write(dchan, &chan->slave_config, dir); ++ ++ for_each_sg(sgl, sg, sg_len, i) { ++ addr = sg_dma_address(sg); ++ avail = sg_dma_len(sgl); ++ ++ do { ++ len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); ++ if (addr & 0x7) ++ chan->byte_align = true; ++ ++ /* allocate and populate the descriptor */ ++ new = mmp_pdma_alloc_descriptor(chan); ++ if (!new) { ++ dev_err(chan->dev, "no memory for desc\n"); ++ goto fail; ++ } ++ ++ new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len); ++ ++ /* ++ * Check whether descriptor/source-addr/target-addr is in ++ * region higher than 4G. If so, set related higher bits to 1. ++ */ ++ if (dir == DMA_MEM_TO_DEV) { ++ new->desc.dsadr = addr & 0xffffffff; ++ new->desc.dtadr = chan->dev_addr; ++ new->desc.dsadrh = (addr >> 32); ++ new->desc.dtadrh = 0; ++ } else if (dir == DMA_DEV_TO_MEM) { ++ new->desc.dsadr = chan->dev_addr; ++ new->desc.dtadr = addr & 0xffffffff; ++ new->desc.dsadrh = 0; ++ new->desc.dtadrh = (addr >> 32); ++ } else { ++ dev_err(chan->dev, "wrong direction: 0x%x\n", chan->dir); ++ goto fail; ++ } ++ ++ if (!first) ++ first = new; ++ else { ++ prev->desc.ddadr = new->async_tx.phys; ++ prev->desc.ddadrh = (new->async_tx.phys >> 32); ++ } ++ ++ new->async_tx.cookie = 0; ++ async_tx_ack(&new->async_tx); ++ prev = new; ++ ++ /* Insert the link descriptor to the LD ring */ ++ list_add_tail(&new->node, &first->tx_list); ++ ++ /* update metadata */ ++ addr += len; ++ avail -= len; ++ } while (avail); ++ } ++ ++ first->async_tx.cookie = -EBUSY; ++ first->async_tx.flags = flags; ++ ++ /* last desc and fire IRQ */ ++ new->desc.ddadr = DDADR_STOP; ++ new->desc.dcmd |= DCMD_ENDIRQEN; ++ ++ chan->dir = dir; ++ chan->cyclic_first = NULL; ++ ++ return &first->async_tx; ++ ++fail: ++ if (first) ++ mmp_pdma_free_desc_list(chan, &first->tx_list); ++ return NULL; ++} ++ ++static struct dma_async_tx_descriptor * ++mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan, ++ dma_addr_t buf_addr, size_t len, size_t period_len, ++ enum dma_transfer_direction direction, ++ unsigned long flags) ++{ ++ struct mmp_pdma_chan *chan; ++ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; ++ dma_addr_t dma_src, dma_dst; ++ dma_addr_t dma_srch, dma_dsth; ++ ++ if (!dchan || !len || !period_len) ++ return NULL; ++ ++ /* the buffer length must be a multiple of period_len */ ++ if (len % period_len != 0) ++ return NULL; ++ ++ if (period_len > PDMA_MAX_DESC_BYTES) ++ return NULL; ++ ++ chan = to_mmp_pdma_chan(dchan); ++ mmp_pdma_config_write(dchan, &chan->slave_config, direction); ++ ++ switch (direction) { ++ case DMA_MEM_TO_DEV: ++ dma_src = buf_addr & 0xffffffff; ++ dma_dst = chan->dev_addr; ++ dma_srch = (buf_addr >> 32); ++ dma_dsth = 0; ++ break; ++ case DMA_DEV_TO_MEM: ++ dma_dst = buf_addr & 0xffffffff; ++ dma_src = chan->dev_addr; ++ dma_dsth = (buf_addr >> 32); ++ dma_srch = 0; ++ break; ++ default: ++ dev_err(chan->dev, "Unsupported direction for cyclic DMA\n"); ++ return NULL; ++ } ++ ++ chan->dir = direction; ++ ++ do { ++ /* Allocate the link descriptor from DMA pool */ ++ new = mmp_pdma_alloc_descriptor(chan); ++ if (!new) { ++ dev_err(chan->dev, "no memory for desc\n"); ++ goto fail; ++ } ++ ++ new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN | ++ (DCMD_LENGTH & period_len)); ++ new->desc.dsadr = dma_src; ++ new->desc.dtadr = dma_dst; ++ new->desc.dsadrh = dma_dsth; ++ new->desc.dtadrh = dma_srch; ++ ++ if (!first) ++ first = new; ++ else { ++ prev->desc.ddadr = new->async_tx.phys; ++ prev->desc.ddadrh = (new->async_tx.phys >> 32); ++ } ++ ++ new->async_tx.cookie = 0; ++ async_tx_ack(&new->async_tx); ++ ++ prev = new; ++ len -= period_len; ++ ++ if (chan->dir == DMA_MEM_TO_DEV) ++ dma_src += period_len; ++ else ++ dma_dst += period_len; ++ ++ /* Insert the link descriptor to the LD ring */ ++ list_add_tail(&new->node, &first->tx_list); ++ } while (len); ++ ++ first->async_tx.flags = flags; /* client is in control of this ack */ ++ first->async_tx.cookie = -EBUSY; ++ ++ /* make the cyclic link */ ++ new->desc.ddadr = first->async_tx.phys; ++ chan->cyclic_first = first; ++ ++ return &first->async_tx; ++ ++fail: ++ if (first) ++ mmp_pdma_free_desc_list(chan, &first->tx_list); ++ return NULL; ++} ++ ++static int mmp_pdma_pause_chan(struct dma_chan *dchan) ++{ ++ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); ++ ++ if (!chan->phy) ++ return -1; ++ ++ disable_chan(chan->phy); ++ chan->status = DMA_PAUSED; ++ ++ return 0; ++} ++ ++static int mmp_pdma_config(struct dma_chan *dchan, ++ struct dma_slave_config *cfg) ++{ ++ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); ++ ++ memcpy(&chan->slave_config, cfg, sizeof(*cfg)); ++ return 0; ++} ++ ++static int mmp_pdma_terminate_all(struct dma_chan *dchan) ++{ ++ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); ++ unsigned long flags; ++ ++ if (!dchan) ++ return -EINVAL; ++ ++ spin_lock_irqsave(&chan->desc_lock, flags); ++ disable_chan(chan->phy); ++ chan->status = DMA_COMPLETE; ++ mmp_pdma_free_phy(chan); ++ ++ mmp_pdma_free_desc_list(chan, &chan->chain_pending); ++ mmp_pdma_free_desc_list(chan, &chan->chain_running); ++ chan->bytes_residue = 0; ++ ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++ chan->idle = true; ++ ++ mmp_pdma_qos_put(chan); ++ ++ return 0; ++} ++ ++static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan, ++ dma_cookie_t cookie) ++{ ++ struct mmp_pdma_desc_sw *sw; ++ u32 curr, residue = 0; ++ bool passed = false; ++ bool cyclic = chan->cyclic_first != NULL; ++ ++ /* ++ * If the channel does not have a phy pointer anymore, it has already ++ * been completed. Therefore, its residue is 0. ++ */ ++ if (!chan->phy) ++ return chan->bytes_residue; /* special case for EORIRQEN */ ++ ++ if (chan->dir == DMA_DEV_TO_MEM) ++ curr = readl(chan->phy->base + DTADR(chan->phy->idx)); ++ else ++ curr = readl(chan->phy->base + DSADR(chan->phy->idx)); ++ ++ list_for_each_entry(sw, &chan->chain_running, node) { ++ u32 start, end, len; ++ ++ if (chan->dir == DMA_DEV_TO_MEM) ++ start = sw->desc.dtadr; ++ else ++ start = sw->desc.dsadr; ++ ++ len = sw->desc.dcmd & DCMD_LENGTH; ++ end = start + len; ++ ++ /* ++ * 'passed' will be latched once we found the descriptor which ++ * lies inside the boundaries of the curr pointer. All ++ * descriptors that occur in the list _after_ we found that ++ * partially handled descriptor are still to be processed and ++ * are hence added to the residual bytes counter. ++ */ ++ if (passed) { ++ residue += len; ++ } else if (curr >= start && curr <= end) { ++ residue += end - curr; ++ passed = true; ++ } ++ ++ /* ++ * Descriptors that have the ENDIRQEN bit set mark the end of a ++ * transaction chain, and the cookie assigned with it has been ++ * returned previously from mmp_pdma_tx_submit(). ++ * ++ * In case we have multiple transactions in the running chain, ++ * and the cookie does not match the one the user asked us ++ * about, reset the state variables and start over. ++ * ++ * This logic does not apply to cyclic transactions, where all ++ * descriptors have the ENDIRQEN bit set, and for which we ++ * can't have multiple transactions on one channel anyway. ++ */ ++ if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN)) ++ continue; ++ ++ if (sw->async_tx.cookie == cookie) ++ return residue; ++ ++ residue = 0; ++ passed = false; ++ } ++ ++ /* We should only get here in case of cyclic transactions */ ++ return residue; ++} ++ ++static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, ++ dma_cookie_t cookie, ++ struct dma_tx_state *txstate) ++{ ++ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); ++ enum dma_status ret; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&chan->desc_lock, flags); ++ ret = dma_cookie_status(dchan, cookie, txstate); ++ if (likely(ret != DMA_ERROR)) ++ dma_set_residue(txstate, mmp_pdma_residue(chan, cookie)); ++ ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++ ++ if (ret == DMA_COMPLETE) ++ return ret; ++ else ++ return chan->status; ++} ++ ++/* ++ * mmp_pdma_issue_pending - Issue the DMA start command ++ * pending list ==> running list ++ */ ++static void mmp_pdma_issue_pending(struct dma_chan *dchan) ++{ ++ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); ++ unsigned long flags; ++ int ret = 0; ++ ++ mmp_pdma_qos_get(chan); ++ spin_lock_irqsave(&chan->desc_lock, flags); ++ ret = start_pending_queue(chan); ++ ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++ ++ if (ret) ++ mmp_pdma_qos_put(chan); ++} ++ ++/* ++ * dma_do_tasklet ++ * Do call back ++ * Start pending list ++ */ ++static void dma_do_tasklet(struct tasklet_struct *t) ++{ ++ struct mmp_pdma_chan *chan = from_tasklet(chan, t, tasklet); ++ struct mmp_pdma_desc_sw *desc, *_desc; ++ LIST_HEAD(chain_cleanup); ++ unsigned long flags; ++ struct dmaengine_desc_callback cb; ++ ++ int ret = 0; ++ ++ /* return if this channel has been stopped */ ++ spin_lock_irqsave(&chan->desc_lock, flags); ++ if (chan->status == DMA_COMPLETE) { ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++ return; ++ } ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++ ++ if (chan->cyclic_first) { ++ spin_lock_irqsave(&chan->desc_lock, flags); ++ desc = chan->cyclic_first; ++ dmaengine_desc_get_callback(&desc->async_tx, &cb); ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++ ++ dmaengine_desc_callback_invoke(&cb, NULL); ++ ++ return; ++ } ++ ++ /* submit pending list; callback for each desc; free desc */ ++ spin_lock_irqsave(&chan->desc_lock, flags); ++ ++ /* special for the EORIRQEN case, residue is not 0 */ ++ list_for_each_entry(desc, &chan->chain_running, node) { ++ if (desc->desc.dcmd & DCMD_ENDIRQEN) { ++ chan->bytes_residue = ++ mmp_pdma_residue(chan, desc->async_tx.cookie); ++ break; ++ } ++ } ++ ++ list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) { ++ /* ++ * move the descriptors to a temporary list so we can drop ++ * the lock during the entire cleanup operation ++ */ ++ list_move(&desc->node, &chain_cleanup); ++ ++ /* ++ * Look for the first list entry which has the ENDIRQEN flag ++ * set. That is the descriptor we got an interrupt for, so ++ * complete that transaction and its cookie. ++ */ ++ if (desc->desc.dcmd & DCMD_ENDIRQEN) { ++ dma_cookie_t cookie = desc->async_tx.cookie; ++ ++ dma_cookie_complete(&desc->async_tx); ++ dev_dbg(chan->dev, "completed_cookie=%d\n", cookie); ++ break; ++ } ++ } ++ ++ /* ++ * The hardware is idle and ready for more when the ++ * chain_running list is empty. ++ */ ++ chan->status = list_empty(&chan->chain_running) ? ++ DMA_COMPLETE : DMA_IN_PROGRESS; ++ ++ /* Start any pending transactions automatically */ ++ ret = start_pending_queue(chan); ++ ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++ ++ /* restart pending transactions failed, do not need qos anymore */ ++ if (ret) ++ mmp_pdma_qos_put(chan); ++ ++ /* Run the callback for each descriptor, in order */ ++ list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) { ++ struct dma_async_tx_descriptor *txd = &desc->async_tx; ++ ++ /* Remove from the list of transactions */ ++ list_del(&desc->node); ++ /* Run the link descriptor callback function */ ++ dmaengine_desc_get_callback(txd, &cb); ++ dmaengine_desc_callback_invoke(&cb, NULL); ++ ++ dma_pool_free(chan->desc_pool, desc, txd->phys); ++ } ++} ++ ++static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx) ++{ ++ struct mmp_pdma_phy *phy = &pdev->phy[idx]; ++ struct mmp_pdma_chan *chan; ++ ++ chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL); ++ if (chan == NULL) ++ return -ENOMEM; ++ ++ phy->idx = idx; ++ phy->base = pdev->base; ++ ++ spin_lock_init(&chan->desc_lock); ++ chan->dev = pdev->dev; ++ chan->chan.device = &pdev->device; ++ tasklet_setup(&chan->tasklet, dma_do_tasklet); ++ INIT_LIST_HEAD(&chan->chain_pending); ++ INIT_LIST_HEAD(&chan->chain_running); ++ ++ chan->status = DMA_COMPLETE; ++ chan->bytes_residue = 0; ++ chan->qos_count = 0; ++ chan->user_do_qos = 1; ++ ++ /* register virt channel to dma engine */ ++ list_add_tail(&chan->chan.device_node, &pdev->device.channels); ++ ++ return 0; ++} ++ ++static const struct of_device_id mmp_pdma_dt_ids[] = { ++ { .compatible = "spacemit,k1-pdma", }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids); ++ ++static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec, ++ struct of_dma *ofdma) ++{ ++ struct mmp_pdma_device *d = ofdma->of_dma_data; ++ struct dma_chan *chan; ++ ++ chan = dma_get_any_slave_channel(&d->device); ++ if (!chan) ++ return NULL; ++ ++ to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0]; ++ ++ return chan; ++} ++ ++static int mmp_pdma_probe(struct platform_device *op) ++{ ++ struct mmp_pdma_device *pdev; ++ const struct of_device_id *of_id; ++ struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); ++ struct resource *iores; ++ int i, ret, value; ++ int irq = 0, dma_channels = 0; ++ const enum dma_slave_buswidth widths = ++ DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | ++ DMA_SLAVE_BUSWIDTH_4_BYTES; ++ ++ int nr_reserved_channels; ++ const int *list; ++ unsigned int max_burst_size = DEFAULT_MAX_BURST_SIZE; ++ ++ pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); ++ if (!pdev) ++ return -ENOMEM; ++ ++ pdev->dev = &op->dev; ++ ++ spin_lock_init(&pdev->phy_lock); ++ ++ iores = platform_get_resource(op, IORESOURCE_MEM, 0); ++ pdev->base = devm_ioremap_resource(pdev->dev, iores); ++ if (IS_ERR(pdev->base)) ++ return PTR_ERR(pdev->base); ++ ++ pdev->clk = devm_clk_get(pdev->dev, NULL); ++ if (IS_ERR(pdev->clk)) ++ return PTR_ERR(pdev->clk); ++ ++ ret = clk_prepare_enable(pdev->clk); ++ if (ret) ++ return dev_err_probe(pdev->dev, ret, "could not enable dma bus clock\n"); ++ ++ pdev->resets = devm_reset_control_get_optional(pdev->dev, NULL); ++ if (IS_ERR(pdev->resets)) { ++ ret = PTR_ERR(pdev->resets); ++ goto err_rst; ++ } ++ ret = reset_control_deassert(pdev->resets); ++ if (ret) ++ goto err_rst; ++ ++ of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); ++ ++ if (of_id) { ++ int n; ++ ++ of_property_read_u32(pdev->dev->of_node, "#dma-channels", &dma_channels); ++ list = of_get_property(pdev->dev->of_node, "reserved-channels", &n); ++ if (of_property_read_u32(pdev->dev->of_node, "max-burst-size", &max_burst_size)) { ++ dev_err(pdev->dev, ++ "No max-burst-size node in the device tree, set it to %d\n", ++ DEFAULT_MAX_BURST_SIZE); ++ max_burst_size = DEFAULT_MAX_BURST_SIZE; ++ } ++ ++ if (get_max_burst_setting(max_burst_size) == INVALID_BURST_SETTING) { ++ dev_err(pdev->dev, "Unsupported max-burst-size value %d set it to %d\n", ++ max_burst_size, DEFAULT_MAX_BURST_SIZE); ++ max_burst_size = DEFAULT_MAX_BURST_SIZE; ++ } ++ ++ if (list) { ++ nr_reserved_channels = n / (sizeof(u32) * PDMA_RESRV_CHAN_ARGS_NUM); ++ pdev->nr_reserved_channels = nr_reserved_channels; ++ pdev->reserved_channels = devm_kcalloc(pdev->dev, ++ nr_reserved_channels, ++ sizeof(struct reserved_chan), ++ GFP_KERNEL); ++ if (pdev->reserved_channels == NULL) { ++ ret = -ENOMEM; ++ goto err_out; ++ } ++ ++ for (i = 0; i < nr_reserved_channels; i++) { ++ of_property_read_u32_index(pdev->dev->of_node, ++ "reserved-channels", ++ i * PDMA_RESRV_CHAN_ARGS_NUM, ++ &value); ++ pdev->reserved_channels[i].chan_id = value; ++ of_property_read_u32_index(pdev->dev->of_node, ++ "reserved-channels", ++ i * PDMA_RESRV_CHAN_ARGS_NUM + 1, ++ &value); ++ pdev->reserved_channels[i].drcmr = value; ++ } ++ } ++ } else if (pdata && pdata->dma_channels) ++ dma_channels = pdata->dma_channels; ++ else ++ dma_channels = 32; ++ pdev->dma_channels = dma_channels; ++ ++ pdev->max_burst_size = max_burst_size; ++ dev_dbg(pdev->dev, "set max burst size to %d\n", max_burst_size); ++ ++ pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy), ++ GFP_KERNEL); ++ if (pdev->phy == NULL) { ++ ret = -ENOMEM; ++ goto err_out; ++ } ++ ++ INIT_LIST_HEAD(&pdev->device.channels); ++ ++ /* all chan share one irq, demux inside */ ++ irq = platform_get_irq(op, 0); ++ ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, ++ IRQF_SHARED, "pdma", pdev); ++ if (ret) ++ goto err_out; ++ ++ for (i = 0; i < dma_channels; i++) { ++ ret = mmp_pdma_chan_init(pdev, i); ++ if (ret) ++ goto err_out; ++ } ++ ++ dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); ++ dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask); ++ dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask); ++ dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask); ++ pdev->device.dev = &op->dev; ++ pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources; ++ pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources; ++ pdev->device.device_tx_status = mmp_pdma_tx_status; ++ pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy; ++ pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; ++ pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic; ++ pdev->device.device_issue_pending = mmp_pdma_issue_pending; ++ pdev->device.device_config = mmp_pdma_config; ++ pdev->device.device_pause = mmp_pdma_pause_chan; ++ pdev->device.device_terminate_all = mmp_pdma_terminate_all; ++ pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES; ++ pdev->device.src_addr_widths = widths; ++ pdev->device.dst_addr_widths = widths; ++ pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); ++ pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; ++ ++ dma_set_mask(pdev->dev, DMA_BIT_MASK(64)); ++ ++ ret = dma_async_device_register(&pdev->device); ++ if (ret) { ++ dev_err(pdev->device.dev, "unable to register\n"); ++ goto err_out; ++ } ++ ++ if (op->dev.of_node) { ++ /* Device-tree DMA controller registration */ ++ ret = of_dma_controller_register(op->dev.of_node, ++ mmp_pdma_dma_xlate, pdev); ++ if (ret < 0) { ++ dev_err(&op->dev, "of_dma_controller_register failed\n"); ++ dma_async_device_unregister(&pdev->device); ++ goto err_out; ++ } ++ } ++ ++ platform_set_drvdata(op, pdev); ++ dev_dbg(pdev->device.dev, "initialized %d channels\n", dma_channels); ++ return 0; ++ ++err_out: ++ reset_control_assert(pdev->resets); ++err_rst: ++ clk_disable_unprepare(pdev->clk); ++ return ret; ++} ++ ++static int mmp_pdma_remove(struct platform_device *op) ++{ ++ struct mmp_pdma_device *pdev = platform_get_drvdata(op); ++ int irq = 0; ++ ++ if (op->dev.of_node) ++ of_dma_controller_free(op->dev.of_node); ++ ++ irq = platform_get_irq(op, 0); ++ devm_free_irq(&op->dev, irq, pdev); ++ ++ dma_async_device_unregister(&pdev->device); ++ ++ reset_control_assert(pdev->resets); ++ clk_disable_unprepare(pdev->clk); ++ ++ kfree(pdev->reserved_channels); ++ platform_set_drvdata(op, NULL); ++ ++ return 0; ++} ++ ++static struct platform_driver mmp_pdma_driver = { ++ .driver = { ++ .name = "spacemit-k1-pdma", ++ .of_match_table = mmp_pdma_dt_ids, ++ }, ++ .probe = mmp_pdma_probe, ++ .remove = mmp_pdma_remove, ++}; ++ ++static int __init spacemit_k1_pdma_init(void) ++{ ++ return platform_driver_register(&mmp_pdma_driver); ++} ++ ++static void __exit spacemit_k1_pdma_exit(void) ++{ ++ platform_driver_unregister(&mmp_pdma_driver); ++} ++ ++subsys_initcall(spacemit_k1_pdma_init); ++module_exit(spacemit_k1_pdma_exit); ++ ++MODULE_DESCRIPTION("Spacemit K1 Peripheral DMA Controller Driver"); ++MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig -index cc4716c037a6..a8c03e495ac4 100644 +index 0f3cd1b05ae3..d6aaccfffb01 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig +@@ -155,7 +155,7 @@ config RASPBERRYPI_FIRMWARE + + config FW_CFG_SYSFS + tristate "QEMU fw_cfg device support in sysfs" +- depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || SPARC || X86 || SW64) ++ depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || RISCV || SPARC || SW64 || X86) + depends on HAS_IOPORT_MAP + default n + help @@ -315,5 +315,6 @@ source "drivers/firmware/psci/Kconfig" source "drivers/firmware/smccc/Kconfig" source "drivers/firmware/tegra/Kconfig" @@ -28823,6 +53237,53 @@ index 28fcddcd688f..c549817a4b42 100644 obj-y += tegra/ obj-y += xilinx/ +obj-y += xuantie/ +diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile +index da8eac6dfc0f..08a44140087f 100644 +--- a/drivers/firmware/efi/libstub/Makefile ++++ b/drivers/firmware/efi/libstub/Makefile +@@ -28,7 +28,7 @@ cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \ + -DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \ + -DEFI_HAVE_STRCMP -fno-builtin -fpic \ + $(call cc-option,-mno-single-pic-base) +-cflags-$(CONFIG_RISCV) += -fpic -mno-relax ++cflags-$(CONFIG_RISCV) += -fpic -mno-relax -DNO_ALTERNATIVE + cflags-$(CONFIG_LOONGARCH) += -fpie + + cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt +diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c +index 01f0f90ea418..fa71cd898120 100644 +--- a/drivers/firmware/efi/riscv-runtime.c ++++ b/drivers/firmware/efi/riscv-runtime.c +@@ -152,3 +152,16 @@ void arch_efi_call_virt_teardown(void) + { + efi_virtmap_unload(); + } ++ ++static int __init riscv_dmi_init(void) ++{ ++ /* ++ * On riscv, DMI depends on UEFI, and dmi_setup() needs to ++ * be called early because dmi_id_init(), which is an arch_initcall ++ * itself, depends on dmi_scan_machine() having been called already. ++ */ ++ dmi_setup(); ++ ++ return 0; ++} ++core_initcall(riscv_dmi_init); +diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c +index f4fea1ec3201..24b302c9e212 100644 +--- a/drivers/firmware/qemu_fw_cfg.c ++++ b/drivers/firmware/qemu_fw_cfg.c +@@ -211,7 +211,7 @@ static void fw_cfg_io_cleanup(void) + + /* arch-specific ctrl & data register offsets are not available in ACPI, DT */ + #if !(defined(FW_CFG_CTRL_OFF) && defined(FW_CFG_DATA_OFF)) +-# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_SW64)) ++# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_SW64) || defined(CONFIG_RISCV)) + # define FW_CFG_CTRL_OFF 0x08 + # define FW_CFG_DATA_OFF 0x00 + # define FW_CFG_DMA_OFF 0x10 diff --git a/drivers/firmware/xuantie/Kconfig b/drivers/firmware/xuantie/Kconfig new file mode 100644 index 000000000000..b10c0416067f @@ -29809,6 +54270,38 @@ index 000000000000..20d216522c81 + pr_debug("th1520 proc log removed\n"); +} \ No newline at end of file +diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig +index 904b71c06eba..77d2d065a1b2 100644 +--- a/drivers/gpio/Kconfig ++++ b/drivers/gpio/Kconfig +@@ -1828,6 +1828,15 @@ config GPIO_SIM + This enables the GPIO simulator - a configfs-based GPIO testing + driver. + ++config GPIO_K1X ++ bool "Spacemit k1x GPIO support" ++ depends on PINCTRL_SPACEMIT_K1X ++ help ++ Say yes here to support the k1x GPIO device. ++ The k1x GPIO device may have several banks, and each ++ bank control at most 32 GPIO pins. The number of banks ++ is passed by device tree or platform data. ++ + endmenu + + endif +diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile +index e44a700ec7d3..06e2c4fcb6c3 100644 +--- a/drivers/gpio/Makefile ++++ b/drivers/gpio/Makefile +@@ -78,6 +78,7 @@ obj-$(CONFIG_GPIO_IMX_SCU) += gpio-imx-scu.o + obj-$(CONFIG_GPIO_IT87) += gpio-it87.o + obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o + obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o ++obj-$(CONFIG_GPIO_K1X) += gpio-k1x.o + obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o + obj-$(CONFIG_GPIO_LATCH) += gpio-latch.o + obj-$(CONFIG_GPIO_LJCA) += gpio-ljca.o diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index 6b7d47a52b10..8a63ff1e5f73 100644 --- a/drivers/gpio/gpio-dwapb.c @@ -29847,11 +54340,424 @@ index 6b7d47a52b10..8a63ff1e5f73 100644 /* Only port A can provide interrupts in all configurations of the IP */ if (pp->idx == 0) +diff --git a/drivers/gpio/gpio-k1x.c b/drivers/gpio/gpio-k1x.c +new file mode 100644 +index 000000000000..4491a9ca4169 +--- /dev/null ++++ b/drivers/gpio/gpio-k1x.c +@@ -0,0 +1,407 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * spacemit-k1x gpio driver file ++ * ++ * Copyright (C) 2023 Spacemit ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define GPLR 0x0 ++#define GPDR 0xc ++#define GPSR 0x18 ++#define GPCR 0x24 ++#define GRER 0x30 ++#define GFER 0x3c ++#define GEDR 0x48 ++#define GSDR 0x54 ++#define GCDR 0x60 ++#define GSRER 0x6c ++#define GCRER 0x78 ++#define GSFER 0x84 ++#define GCFER 0x90 ++#define GAPMASK 0x9c ++#define GCPMASK 0xa8 ++ ++#define K1X_BANK_GPIO_NUMBER (32) ++#define BANK_GPIO_MASK (K1X_BANK_GPIO_NUMBER - 1) ++ ++#define k1x_gpio_to_bank_idx(gpio) ((gpio) / K1X_BANK_GPIO_NUMBER) ++#define k1x_gpio_to_bank_offset(gpio) ((gpio) & BANK_GPIO_MASK) ++#define k1x_bank_to_gpio(idx, offset) (((idx) * K1X_BANK_GPIO_NUMBER) | \ ++ ((offset) & BANK_GPIO_MASK)) ++ ++struct k1x_gpio_bank { ++ void __iomem *reg_bank; ++ u32 irq_mask; ++ u32 irq_rising_edge; ++ u32 irq_falling_edge; ++}; ++ ++struct k1x_gpio_chip { ++ struct gpio_chip chip; ++ void __iomem *reg_base; ++ int irq; ++ struct irq_domain *domain; ++ unsigned int ngpio; ++ unsigned int nbank; ++ struct k1x_gpio_bank *banks; ++}; ++ ++static int k1x_gpio_to_irq(struct gpio_chip *chip, unsigned int offset) ++{ ++ struct k1x_gpio_chip *k1x_chip = container_of(chip, struct k1x_gpio_chip, chip); ++ ++ return irq_create_mapping(k1x_chip->domain, offset); ++} ++ ++static int k1x_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) ++{ ++ struct k1x_gpio_chip *k1x_chip = container_of(chip, struct k1x_gpio_chip, chip); ++ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(offset)]; ++ u32 bit = (1 << k1x_gpio_to_bank_offset(offset)); ++ ++ writel(bit, bank->reg_bank + GCDR); ++ ++ return 0; ++} ++ ++static int k1x_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) ++{ ++ struct k1x_gpio_chip *k1x_chip = ++ container_of(chip, struct k1x_gpio_chip, chip); ++ struct k1x_gpio_bank *bank = ++ &k1x_chip->banks[k1x_gpio_to_bank_idx(offset)]; ++ u32 bit = (1 << k1x_gpio_to_bank_offset(offset)); ++ ++ /* Set value first. */ ++ writel(bit, bank->reg_bank + (value ? GPSR : GPCR)); ++ writel(bit, bank->reg_bank + GSDR); ++ ++ return 0; ++} ++ ++static int k1x_gpio_get(struct gpio_chip *chip, unsigned int offset) ++{ ++ struct k1x_gpio_chip *k1x_chip = container_of(chip, struct k1x_gpio_chip, chip); ++ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(offset)]; ++ u32 bit = (1 << k1x_gpio_to_bank_offset(offset)); ++ u32 gplr; ++ ++ gplr = readl(bank->reg_bank + GPLR); ++ ++ return !!(gplr & bit); ++} ++ ++static void k1x_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) ++{ ++ struct k1x_gpio_chip *k1x_chip = container_of(chip, struct k1x_gpio_chip, chip); ++ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(offset)]; ++ u32 bit = (1 << k1x_gpio_to_bank_offset(offset)); ++ u32 gpdr; ++ ++ gpdr = readl(bank->reg_bank + GPDR); ++ /* Is it configured as output? */ ++ if (gpdr & bit) ++ writel(bit, bank->reg_bank + (value ? GPSR : GPCR)); ++} ++ ++#ifdef CONFIG_OF_GPIO ++static int k1x_gpio_of_xlate(struct gpio_chip *chip, ++ const struct of_phandle_args *gpiospec, ++ u32 *flags) ++{ ++ struct k1x_gpio_chip *k1x_chip = container_of(chip, struct k1x_gpio_chip, chip); ++ ++ /* GPIO index start from 0. */ ++ if (gpiospec->args[0] >= k1x_chip->ngpio) ++ return -EINVAL; ++ ++ if (flags) ++ *flags = gpiospec->args[1]; ++ ++ return gpiospec->args[0]; ++} ++#endif ++ ++static int k1x_gpio_irq_type(struct irq_data *d, unsigned int type) ++{ ++ struct k1x_gpio_chip *k1x_chip = irq_data_get_irq_chip_data(d); ++ int gpio = irqd_to_hwirq(d); ++ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(gpio)]; ++ u32 bit = (1 << k1x_gpio_to_bank_offset(gpio)); ++ ++ if (type & IRQ_TYPE_EDGE_RISING) { ++ bank->irq_rising_edge |= bit; ++ writel(bit, bank->reg_bank + GSRER); ++ } else { ++ bank->irq_rising_edge &= ~bit; ++ writel(bit, bank->reg_bank + GCRER); ++ } ++ ++ if (type & IRQ_TYPE_EDGE_FALLING) { ++ bank->irq_falling_edge |= bit; ++ writel(bit, bank->reg_bank + GSFER); ++ } else { ++ bank->irq_falling_edge &= ~bit; ++ writel(bit, bank->reg_bank + GCFER); ++ } ++ ++ return 0; ++} ++ ++static irqreturn_t k1x_gpio_demux_handler(int irq, void *data) ++{ ++ int i, n; ++ u32 gedr; ++ unsigned long pending = 0; ++ unsigned int irq_num, irqs_handled = 0; ++ struct k1x_gpio_bank *bank; ++ struct k1x_gpio_chip *k1x_chip = (struct k1x_gpio_chip *)data; ++ ++ for (i = 0; i < k1x_chip->nbank; i++) { ++ bank = &k1x_chip->banks[i]; ++ ++ gedr = readl(bank->reg_bank + GEDR); ++ if (!gedr) ++ continue; ++ ++ writel(gedr, bank->reg_bank + GEDR); ++ gedr = gedr & bank->irq_mask; ++ ++ if (!gedr) ++ continue; ++ pending = gedr; ++ for_each_set_bit(n, &pending, BITS_PER_LONG) { ++ irq_num = irq_find_mapping(k1x_chip->domain, ++ k1x_bank_to_gpio(i, n)); ++ generic_handle_irq(irq_num); ++ } ++ irqs_handled++; ++ } ++ ++ return irqs_handled ? IRQ_HANDLED : IRQ_NONE; ++} ++ ++static void k1x_ack_muxed_gpio(struct irq_data *d) ++{ ++ struct k1x_gpio_chip *k1x_chip = irq_data_get_irq_chip_data(d); ++ int gpio = irqd_to_hwirq(d); ++ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(gpio)]; ++ u32 bit = (1 << k1x_gpio_to_bank_offset(gpio)); ++ ++ writel(bit, bank->reg_bank + GEDR); ++} ++ ++static void k1x_mask_muxed_gpio(struct irq_data *d) ++{ ++ struct k1x_gpio_chip *k1x_chip = irq_data_get_irq_chip_data(d); ++ int gpio = irqd_to_hwirq(d); ++ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(gpio)]; ++ u32 bit = (1 << k1x_gpio_to_bank_offset(gpio)); ++ ++ bank->irq_mask &= ~bit; ++ ++ /* Clear the bit of rising and falling edge detection. */ ++ writel(bit, bank->reg_bank + GCRER); ++ writel(bit, bank->reg_bank + GCFER); ++} ++ ++static void k1x_unmask_muxed_gpio(struct irq_data *d) ++{ ++ int gpio = irqd_to_hwirq(d); ++ u32 bit = (1 << k1x_gpio_to_bank_offset(gpio)); ++ struct k1x_gpio_chip *k1x_chip = irq_data_get_irq_chip_data(d); ++ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(gpio)]; ++ ++ bank->irq_mask |= bit; ++ ++ /* Set the bit of rising and falling edge detection if the gpio has. */ ++ writel(bit & bank->irq_rising_edge, bank->reg_bank + GSRER); ++ writel(bit & bank->irq_falling_edge, bank->reg_bank + GSFER); ++} ++ ++static struct irq_chip k1x_muxed_gpio_chip = { ++ .name = "k1x-gpio-irqchip", ++ .irq_ack = k1x_ack_muxed_gpio, ++ .irq_mask = k1x_mask_muxed_gpio, ++ .irq_unmask = k1x_unmask_muxed_gpio, ++ .irq_set_type = k1x_gpio_irq_type, ++ .flags = IRQCHIP_SKIP_SET_WAKE, ++}; ++ ++static const struct of_device_id k1x_gpio_dt_ids[] = { ++ { .compatible = "spacemit,k1x-gpio"}, ++ {} ++}; ++ ++static int k1x_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) ++{ ++ irq_set_chip_and_handler(irq, &k1x_muxed_gpio_chip, handle_edge_irq); ++ irq_set_chip_data(irq, d->host_data); ++ ++ return 0; ++} ++ ++static const struct irq_domain_ops k1x_gpio_irq_domain_ops = { ++ .map = k1x_irq_domain_map, ++ .xlate = irq_domain_xlate_twocell, ++}; ++ ++static int k1x_gpio_probe_dt(struct platform_device *pdev, struct k1x_gpio_chip *k1x_chip) ++{ ++ u32 offset; ++ int i, nbank, ret; ++ struct device_node *child; ++ struct device_node *np = pdev->dev.of_node; ++ ++ nbank = of_get_child_count(np); ++ if (nbank == 0) ++ return -EINVAL; ++ ++ k1x_chip->banks = devm_kzalloc(&pdev->dev, ++ sizeof(*k1x_chip->banks) * nbank, ++ GFP_KERNEL); ++ if (!k1x_chip->banks) ++ return -ENOMEM; ++ ++ i = 0; ++ for_each_child_of_node(np, child) { ++ ret = of_property_read_u32(child, "reg-offset", &offset); ++ if (ret) { ++ of_node_put(child); ++ return ret; ++ } ++ k1x_chip->banks[i].reg_bank = k1x_chip->reg_base + offset; ++ i++; ++ } ++ ++ k1x_chip->nbank = nbank; ++ k1x_chip->ngpio = k1x_chip->nbank * K1X_BANK_GPIO_NUMBER; ++ ++ return 0; ++} ++ ++static int k1x_gpio_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct device_node *np; ++ struct k1x_gpio_chip *k1x_chip; ++ struct k1x_gpio_bank *bank; ++ struct resource *res; ++ struct irq_domain *domain; ++ struct clk *clk; ++ ++ int irq, i, ret; ++ void __iomem *base; ++ ++ np = pdev->dev.of_node; ++ if (!np) ++ return -EINVAL; ++ ++ k1x_chip = devm_kzalloc(dev, sizeof(*k1x_chip), GFP_KERNEL); ++ if (!k1x_chip) ++ return -ENOMEM; ++ ++ irq = platform_get_irq(pdev, 0); ++ if (irq < 0) ++ return irq; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) ++ return -EINVAL; ++ base = devm_ioremap_resource(dev, res); ++ if (!base) ++ return -EINVAL; ++ ++ k1x_chip->irq = irq; ++ k1x_chip->reg_base = base; ++ ++ ret = k1x_gpio_probe_dt(pdev, k1x_chip); ++ if (ret) { ++ dev_err(dev, "Fail to initialize gpio unit, error %d.\n", ret); ++ return ret; ++ } ++ ++ clk = devm_clk_get(dev, NULL); ++ if (IS_ERR(clk)) { ++ dev_err(dev, "Fail to get gpio clock, error %ld.\n", ++ PTR_ERR(clk)); ++ return PTR_ERR(clk); ++ } ++ ret = clk_prepare_enable(clk); ++ if (ret) { ++ dev_err(dev, "Fail to enable gpio clock, error %d.\n", ret); ++ return ret; ++ } ++ ++ domain = irq_domain_add_linear(np, k1x_chip->ngpio, &k1x_gpio_irq_domain_ops, k1x_chip); ++ if (!domain) ++ return -EINVAL; ++ ++ k1x_chip->domain = domain; ++ ++ /* Initialize the gpio chip */ ++ k1x_chip->chip.label = "k1x-gpio"; ++ k1x_chip->chip.request = gpiochip_generic_request; ++ k1x_chip->chip.free = gpiochip_generic_free; ++ k1x_chip->chip.direction_input = k1x_gpio_direction_input; ++ k1x_chip->chip.direction_output = k1x_gpio_direction_output; ++ k1x_chip->chip.get = k1x_gpio_get; ++ k1x_chip->chip.set = k1x_gpio_set; ++ k1x_chip->chip.to_irq = k1x_gpio_to_irq; ++#ifdef CONFIG_OF_GPIO ++ k1x_chip->chip.fwnode = of_fwnode_handle(np); ++ k1x_chip->chip.of_xlate = k1x_gpio_of_xlate; ++ k1x_chip->chip.of_gpio_n_cells = 2; ++#endif ++ k1x_chip->chip.ngpio = k1x_chip->ngpio; ++ ++ if (devm_request_irq(&pdev->dev, irq, k1x_gpio_demux_handler, 0, ++ k1x_chip->chip.label, k1x_chip)) { ++ dev_err(&pdev->dev, "failed to request high IRQ\n"); ++ ret = -ENOENT; ++ goto err; ++ } ++ ++ gpiochip_add(&k1x_chip->chip); ++ ++ /* clear all GPIO edge detects */ ++ for (i = 0; i < k1x_chip->nbank; i++) { ++ bank = &k1x_chip->banks[i]; ++ writel(0xffffffff, bank->reg_bank + GCFER); ++ writel(0xffffffff, bank->reg_bank + GCRER); ++ /* Unmask edge detection to AP. */ ++ writel(0xffffffff, bank->reg_bank + GAPMASK); ++ } ++ ++ return 0; ++err: ++ irq_domain_remove(domain); ++ return ret; ++} ++ ++static struct platform_driver k1x_gpio_driver = { ++ .probe = k1x_gpio_probe, ++ .driver = { ++ .name = "k1x-gpio", ++ .of_match_table = k1x_gpio_dt_ids, ++ }, ++}; ++ ++static int __init k1x_gpio_init(void) ++{ ++ return platform_driver_register(&k1x_gpio_driver); ++} ++subsys_initcall(k1x_gpio_init); diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c -index ce9a94e33280..329d2c434161 100644 +index b882b26ab500..d502bb36434b 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c -@@ -1358,12 +1358,20 @@ static const struct of_device_id pca953x_dt_ids[] = { +@@ -1322,12 +1322,20 @@ static const struct of_device_id pca953x_dt_ids[] = { MODULE_DEVICE_TABLE(of, pca953x_dt_ids); @@ -29875,7 +54781,7 @@ index ce9a94e33280..329d2c434161 100644 .acpi_match_table = pca953x_acpi_ids, }, diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig -index a5b92adb8aff..486d14f1495a 100644 +index d1cad875d2f7..191c700fde97 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -390,6 +390,10 @@ source "drivers/gpu/drm/sprd/Kconfig" @@ -29890,7 +54796,7 @@ index a5b92adb8aff..486d14f1495a 100644 tristate "DRM Support for Hyper-V synthetic video device" depends on DRM && PCI && MMU && HYPERV diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile -index 7ba2ec90c3f7..183e306f0c6e 100644 +index f93fd0ac8661..caff5405c5f4 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -184,6 +184,7 @@ obj-y += hisilicon/ @@ -29901,13 +54807,13 @@ index 7ba2ec90c3f7..183e306f0c6e 100644 obj-$(CONFIG_DRM_TVE200) += tve200/ obj-$(CONFIG_DRM_XEN) += xen/ obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/ -@@ -200,3 +201,4 @@ obj-$(CONFIG_DRM_SPRD) += sprd/ +@@ -199,3 +200,4 @@ obj-y += solomon/ + obj-$(CONFIG_DRM_SPRD) += sprd/ obj-y += loongson/ obj-$(CONFIG_DRM_PHYTIUM) += phytium/ - obj-$(CONFIG_HYDCU_FIXUP_HEADER) += hygon/hydcu-fixup-header/ +obj-$(CONFIG_DRM_VERISILICON) += verisilicon/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c -index 2f64c6871935..2d2f0714ea73 100644 +index 8136e49cb6d1..9a5b5dc210ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1109,6 +1109,8 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) @@ -337589,7 +362495,7 @@ index b3fffe7b5062..aa137ead5cc5 100644 tmp = pgprot_writecombine(tmp); else diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c -index 46ff9c75bb12..63a9b8d41b94 100644 +index 68b02e46c061..d7fa069480f9 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -187,7 +187,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo, @@ -337601,7 +362507,7 @@ index 46ff9c75bb12..63a9b8d41b94 100644 res->bo = bo; man = ttm_manager_type(bo->bdev, place->mem_type); -@@ -670,17 +670,18 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, +@@ -674,17 +674,18 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, } else { iter_io->needs_unmap = true; memset(&iter_io->dmap, 0, sizeof(iter_io->dmap)); @@ -349296,11 +374202,38 @@ index 000000000000..913746dc5387 +module_platform_driver(th1520_hwspinlock_driver); + +MODULE_LICENSE("GPL v2"); +diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig +index 585e7e4a1875..cf688f7c698e 100644 +--- a/drivers/i2c/busses/Kconfig ++++ b/drivers/i2c/busses/Kconfig +@@ -303,6 +303,14 @@ config I2C_SIS96X + This driver can also be built as a module. If so, the module + will be called i2c-sis96x. + ++config I2C_SPACEMIT_K1 ++ tristate "Spacemit k1 I2C adapter" ++ help ++ Say yes if you want to use I2C interface on sapcemit k1 platform. ++ ++ This driver can also be built as a module. If so, the module will be ++ called i2c-spacemit-k1. ++ + config I2C_VIA + tristate "VIA VT82C586B" + depends on PCI diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile -index 738519b0a9cb..4f03df727b07 100644 +index 738519b0a9cb..8f0fa01f5498 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile -@@ -55,6 +55,7 @@ obj-$(CONFIG_I2C_CPM) += i2c-cpm.o +@@ -27,6 +27,7 @@ obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o + obj-$(CONFIG_I2C_SIS5595) += i2c-sis5595.o + obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o + obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o ++obj-$(CONFIG_I2C_SPACEMIT_K1) += i2c-spacemit-k1.o + obj-$(CONFIG_I2C_VIA) += i2c-via.o + obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o + obj-$(CONFIG_I2C_ZHAOXIN) += i2c-zhaoxin.o +@@ -55,6 +56,7 @@ obj-$(CONFIG_I2C_CPM) += i2c-cpm.o obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o i2c-designware-core-y := i2c-designware-common.o @@ -349415,7 +374348,7 @@ index d4909e9b1c84..22855b69d912 100644 +int i2c_dw_dt_configure(struct device *device); #endif diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c -index 579c668cb78a..6f4e38437a4e 100644 +index 042642a93cf2..7825869126c6 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -23,6 +23,7 @@ @@ -349440,7 +374373,7 @@ index 579c668cb78a..6f4e38437a4e 100644 } /* -@@ -698,6 +704,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) +@@ -699,6 +705,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) dev->msg_read_idx = 0; dev->msg_err = 0; dev->status = 0; @@ -349448,7 +374381,7 @@ index 579c668cb78a..6f4e38437a4e 100644 dev->abort_source = 0; dev->rx_outstanding = 0; -@@ -713,6 +720,16 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) +@@ -714,6 +721,16 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) i2c_dw_xfer_init(dev); /* Wait for tx to complete */ @@ -349465,7 +374398,7 @@ index 579c668cb78a..6f4e38437a4e 100644 if (!wait_for_completion_timeout(&dev->cmd_complete, adap->timeout)) { dev_err(dev->dev, "controller timed out\n"); /* i2c_dw_init implicitly disables the adapter */ -@@ -759,13 +776,21 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) +@@ -760,13 +777,21 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) goto done; } @@ -349491,7 +374424,7 @@ index 579c668cb78a..6f4e38437a4e 100644 i2c_dw_release_lock(dev); done_nolock: -@@ -847,7 +872,7 @@ static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev) +@@ -848,7 +873,7 @@ static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev) static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) { struct dw_i2c_dev *dev = dev_id; @@ -349500,7 +374433,7 @@ index 579c668cb78a..6f4e38437a4e 100644 regmap_read(dev->map, DW_IC_ENABLE, &enabled); regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat); -@@ -880,11 +905,20 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) +@@ -881,11 +906,20 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) * Anytime TX_ABRT is set, the contents of the tx/rx * buffers are flushed. Make sure to skip them. */ @@ -349522,7 +374455,7 @@ index 579c668cb78a..6f4e38437a4e 100644 i2c_dw_read(dev); if (stat & DW_IC_INTR_TX_EMPTY) -@@ -897,9 +931,16 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) +@@ -898,9 +932,16 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) */ tx_aborted: @@ -349541,7 +374474,7 @@ index 579c668cb78a..6f4e38437a4e 100644 else if (unlikely(dev->flags & ACCESS_INTR_MASK)) { /* Workaround to trigger pending interrupt */ regmap_read(dev->map, DW_IC_INTR_MASK, &stat); -@@ -1021,9 +1062,27 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev) +@@ -1022,9 +1063,27 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev) struct i2c_adapter *adap = &dev->adapter; unsigned long irq_flags; unsigned int ic_con; @@ -349936,10 +374869,18 @@ index 000000000000..e25cc243c9bc +int i2c_dw_xfer_dma_deinit(struct dw_i2c_dev *dev); + diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c -index 11a75130a109..d712a7e418d7 100644 +index 11a75130a109..dfd49670d6d2 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c -@@ -322,6 +322,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) +@@ -58,6 +58,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = { + { "HISI02A3", 0 }, + { "HYGO0010", ACCESS_INTR_MASK }, + { "SUNW0005", MODEL_SUNWAY }, ++ { "SOPH0003", 0 }, + { } + }; + MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); +@@ -322,6 +323,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) if (has_acpi_companion(&pdev->dev)) i2c_dw_acpi_configure(&pdev->dev); @@ -349948,11 +374889,1564 @@ index 11a75130a109..d712a7e418d7 100644 ret = i2c_dw_validate_speed(dev); if (ret) +diff --git a/drivers/i2c/busses/i2c-spacemit-k1.c b/drivers/i2c/busses/i2c-spacemit-k1.c +new file mode 100644 +index 000000000000..dd10c9ff70cc +--- /dev/null ++++ b/drivers/i2c/busses/i2c-spacemit-k1.c +@@ -0,0 +1,1299 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Spacemit i2c driver ++ * ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "i2c-spacemit-k1.h" ++ ++static inline u32 spacemit_i2c_read_reg(struct spacemit_i2c_dev *spacemit_i2c, int reg) ++{ ++ return readl(spacemit_i2c->mapbase + reg); ++} ++ ++static inline void spacemit_i2c_write_reg(struct spacemit_i2c_dev *spacemit_i2c, int reg, u32 val) ++{ ++ writel(val, spacemit_i2c->mapbase + reg); ++} ++ ++static void spacemit_i2c_enable(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ spacemit_i2c_write_reg(spacemit_i2c, REG_CR, ++ spacemit_i2c_read_reg(spacemit_i2c, REG_CR) | CR_IUE); ++} ++ ++static void spacemit_i2c_disable(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ spacemit_i2c->i2c_ctrl_reg_value = spacemit_i2c_read_reg(spacemit_i2c, REG_CR) & ~CR_IUE; ++ spacemit_i2c_write_reg(spacemit_i2c, REG_CR, spacemit_i2c->i2c_ctrl_reg_value); ++} ++ ++static void spacemit_i2c_flush_fifo_buffer(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ /* flush REG_WFIFO_WPTR and REG_WFIFO_RPTR */ ++ spacemit_i2c_write_reg(spacemit_i2c, REG_WFIFO_WPTR, 0); ++ spacemit_i2c_write_reg(spacemit_i2c, REG_WFIFO_RPTR, 0); ++ ++ /* flush REG_RFIFO_WPTR and REG_RFIFO_RPTR */ ++ spacemit_i2c_write_reg(spacemit_i2c, REG_RFIFO_WPTR, 0); ++ spacemit_i2c_write_reg(spacemit_i2c, REG_RFIFO_RPTR, 0); ++} ++ ++static void spacemit_i2c_controller_reset(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ /* i2c controller reset */ ++ spacemit_i2c_write_reg(spacemit_i2c, REG_CR, CR_UR); ++ udelay(5); ++ spacemit_i2c_write_reg(spacemit_i2c, REG_CR, 0); ++ ++ /* set load counter register */ ++ if (spacemit_i2c->i2c_lcr) ++ spacemit_i2c_write_reg(spacemit_i2c, REG_LCR, spacemit_i2c->i2c_lcr); ++ ++ /* set wait counter register */ ++ if (spacemit_i2c->i2c_wcr) ++ spacemit_i2c_write_reg(spacemit_i2c, REG_WCR, spacemit_i2c->i2c_wcr); ++} ++ ++static void spacemit_i2c_bus_reset(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ int clk_cnt = 0; ++ u32 bus_status; ++ ++ /* if bus is locked, reset unit. 0: locked */ ++ bus_status = spacemit_i2c_read_reg(spacemit_i2c, REG_BMR); ++ if (!(bus_status & BMR_SDA) || !(bus_status & BMR_SCL)) { ++ spacemit_i2c_controller_reset(spacemit_i2c); ++ usleep_range(10, 20); ++ ++ /* check scl status again */ ++ bus_status = spacemit_i2c_read_reg(spacemit_i2c, REG_BMR); ++ if (!(bus_status & BMR_SCL)) ++ dev_alert(spacemit_i2c->dev, "unit reset failed\n"); ++ } ++ ++ while (clk_cnt < 9) { ++ /* check whether the SDA is still locked by slave */ ++ bus_status = spacemit_i2c_read_reg(spacemit_i2c, REG_BMR); ++ if (bus_status & BMR_SDA) ++ break; ++ ++ /* if still locked, send one clk to slave to request release */ ++ spacemit_i2c_write_reg(spacemit_i2c, REG_RST_CYC, 0x1); ++ spacemit_i2c_write_reg(spacemit_i2c, REG_CR, CR_RSTREQ); ++ usleep_range(20, 30); ++ clk_cnt++; ++ } ++ ++ bus_status = spacemit_i2c_read_reg(spacemit_i2c, REG_BMR); ++ if (clk_cnt >= 9 && !(bus_status & BMR_SDA)) ++ dev_alert(spacemit_i2c->dev, "bus reset clk reaches the max 9-clocks\n"); ++ else ++ dev_alert(spacemit_i2c->dev, "bus reset, send clk: %d\n", clk_cnt); ++} ++ ++static void spacemit_i2c_reset(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ spacemit_i2c_controller_reset(spacemit_i2c); ++} ++ ++static int spacemit_i2c_recover_bus_busy(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ int timeout; ++ int cnt, ret = 0; ++ ++ if (spacemit_i2c->high_mode) ++ timeout = 1000; /* 1000us */ ++ else ++ timeout = 1500; /* 1500us */ ++ ++ cnt = SPACEMIT_I2C_BUS_RECOVER_TIMEOUT / timeout; ++ ++ if (likely(!(spacemit_i2c_read_reg(spacemit_i2c, REG_SR) & (SR_UB | SR_IBB)))) ++ return 0; ++ ++ /* wait unit and bus to recover idle */ ++ while (unlikely(spacemit_i2c_read_reg(spacemit_i2c, REG_SR) & (SR_UB | SR_IBB))) { ++ if (cnt-- <= 0) ++ break; ++ ++ usleep_range(timeout / 2, timeout); ++ } ++ ++ if (unlikely(cnt <= 0)) { ++ /* reset controller */ ++ spacemit_i2c_reset(spacemit_i2c); ++ ret = -EAGAIN; ++ } ++ ++ return ret; ++} ++ ++static void spacemit_i2c_check_bus_release(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ /* in case bus is not released after transfer completes */ ++ if (unlikely(spacemit_i2c_read_reg(spacemit_i2c, REG_SR) & SR_EBB)) { ++ spacemit_i2c_bus_reset(spacemit_i2c); ++ usleep_range(90, 150); ++ } ++} ++ ++static void spacemit_i2c_unit_init(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ u32 cr_val = 0; ++ ++ /* ++ * Unmask interrupt bits for all xfer mode: ++ * bus error, arbitration loss detected. ++ * For transaction complete signal, we use master stop ++ * interrupt, so we don't need to unmask CR_TXDONEIE. ++ */ ++ cr_val |= CR_BEIE | CR_ALDIE; ++ ++ if (likely(spacemit_i2c->xfer_mode == SPACEMIT_I2C_MODE_INTERRUPT)) ++ /* ++ * Unmask interrupt bits for interrupt xfer mode: ++ * DBR rx full. ++ * For tx empty interrupt CR_DTEIE, we only ++ * need to enable when trigger byte transfer to start ++ * data sending. ++ */ ++ cr_val |= CR_DRFIE; ++ ++ /* set speed bits */ ++ if (spacemit_i2c->fast_mode) ++ cr_val |= CR_MODE_FAST; ++ if (spacemit_i2c->high_mode) ++ cr_val |= CR_MODE_HIGH | CR_GPIOEN; ++ ++ /* disable response to general call */ ++ cr_val |= CR_GCD; ++ ++ /* enable SCL clock output */ ++ cr_val |= CR_SCLE; ++ ++ /* enable master stop detected */ ++ cr_val |= CR_MSDE | CR_MSDIE; ++ ++ /* disable int to use pio xfer mode*/ ++ if (unlikely(spacemit_i2c->xfer_mode == SPACEMIT_I2C_MODE_PIO)) ++ cr_val &= ~(CR_ALDIE | CR_BEIE | CR_MSDIE | CR_DTEIE); ++ spacemit_i2c_write_reg(spacemit_i2c, REG_CR, cr_val); ++} ++ ++static void spacemit_i2c_trigger_byte_xfer(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ u32 cr_val = spacemit_i2c_read_reg(spacemit_i2c, REG_CR); ++ ++ /* send start pulse */ ++ cr_val &= ~CR_STOP; ++ if (spacemit_i2c->xfer_mode == SPACEMIT_I2C_MODE_PIO) ++ cr_val |= CR_START | CR_TB; ++ else ++ cr_val |= CR_START | CR_TB | CR_DTEIE; ++ spacemit_i2c_write_reg(spacemit_i2c, REG_CR, cr_val); ++} ++ ++static inline void ++spacemit_i2c_clear_int_status(struct spacemit_i2c_dev *spacemit_i2c, u32 mask) ++{ ++ spacemit_i2c_write_reg(spacemit_i2c, REG_SR, mask & SPACEMIT_I2C_INT_STATUS_MASK); ++} ++ ++static bool spacemit_i2c_is_last_byte_to_send(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ return (spacemit_i2c->tx_cnt == spacemit_i2c->cur_msg->len && ++ spacemit_i2c->msg_idx == spacemit_i2c->num - 1) ? true : false; ++} ++ ++static bool spacemit_i2c_is_last_byte_to_receive(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ /* ++ * if the message length is received from slave device, ++ * should at least to read out the length byte from slave. ++ */ ++ if (unlikely((spacemit_i2c->cur_msg->flags & I2C_M_RECV_LEN) && ++ !spacemit_i2c->smbus_rcv_len)) { ++ return false; ++ } else { ++ return (spacemit_i2c->rx_cnt == spacemit_i2c->cur_msg->len - 1 && ++ spacemit_i2c->msg_idx == spacemit_i2c->num - 1) ? true : false; ++ } ++} ++ ++static void spacemit_i2c_mark_rw_flag(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ if (spacemit_i2c->cur_msg->flags & I2C_M_RD) { ++ spacemit_i2c->is_rx = true; ++ spacemit_i2c->slave_addr_rw = ++ ((spacemit_i2c->cur_msg->addr & 0x7f) << 1) | 1; ++ } else { ++ spacemit_i2c->is_rx = false; ++ spacemit_i2c->slave_addr_rw = (spacemit_i2c->cur_msg->addr & 0x7f) << 1; ++ } ++} ++ ++static void spacemit_i2c_byte_xfer_send_master_code(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ u32 cr_val = spacemit_i2c_read_reg(spacemit_i2c, REG_CR); ++ ++ spacemit_i2c->phase = SPACEMIT_I2C_XFER_MASTER_CODE; ++ ++ spacemit_i2c_write_reg(spacemit_i2c, REG_DBR, spacemit_i2c->master_code); ++ ++ cr_val &= ~(CR_STOP | CR_ALDIE); ++ ++ /* high mode: enable gpio to let I2C core generates SCL clock */ ++ cr_val |= CR_GPIOEN | CR_START | CR_TB | CR_DTEIE; ++ spacemit_i2c_write_reg(spacemit_i2c, REG_CR, cr_val); ++} ++ ++static void spacemit_i2c_byte_xfer_send_slave_addr(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ spacemit_i2c->phase = SPACEMIT_I2C_XFER_SLAVE_ADDR; ++ ++ /* write slave address to DBR for interrupt mode */ ++ spacemit_i2c_write_reg(spacemit_i2c, REG_DBR, spacemit_i2c->slave_addr_rw); ++ ++ spacemit_i2c_trigger_byte_xfer(spacemit_i2c); ++} ++ ++static int spacemit_i2c_byte_xfer(struct spacemit_i2c_dev *spacemit_i2c); ++static int spacemit_i2c_byte_xfer_next_msg(struct spacemit_i2c_dev *spacemit_i2c); ++ ++static int spacemit_i2c_byte_xfer_body(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ int ret = 0; ++ u8 msglen = 0; ++ u32 cr_val = spacemit_i2c_read_reg(spacemit_i2c, REG_CR); ++ ++ cr_val &= ~(CR_TB | CR_ACKNAK | CR_STOP | CR_START); ++ spacemit_i2c->phase = SPACEMIT_I2C_XFER_BODY; ++ ++ if (spacemit_i2c->i2c_status & SR_IRF) { /* i2c receive full */ ++ /* if current is transmit mode, ignore this signal */ ++ if (!spacemit_i2c->is_rx) ++ return 0; ++ ++ /* ++ * if the message length is received from slave device, ++ * according to i2c spec, we should restrict the length size. ++ */ ++ if (unlikely((spacemit_i2c->cur_msg->flags & I2C_M_RECV_LEN) && ++ !spacemit_i2c->smbus_rcv_len)) { ++ spacemit_i2c->smbus_rcv_len = true; ++ msglen = (u8)spacemit_i2c_read_reg(spacemit_i2c, REG_DBR); ++ if ((msglen == 0) || (msglen > I2C_SMBUS_BLOCK_MAX)) { ++ dev_err(spacemit_i2c->dev, "SMbus len out of range\n"); ++ *spacemit_i2c->msg_buf++ = 0; ++ spacemit_i2c->rx_cnt = spacemit_i2c->cur_msg->len; ++ cr_val |= CR_STOP | CR_ACKNAK; ++ cr_val |= CR_ALDIE | CR_TB; ++ spacemit_i2c_write_reg(spacemit_i2c, REG_CR, cr_val); ++ ++ return 0; ++ } ++ ++ *spacemit_i2c->msg_buf++ = msglen; ++ spacemit_i2c->cur_msg->len = msglen + 1; ++ spacemit_i2c->rx_cnt++; ++ } else { ++ if (spacemit_i2c->rx_cnt < spacemit_i2c->cur_msg->len) { ++ *spacemit_i2c->msg_buf++ = ++ spacemit_i2c_read_reg(spacemit_i2c, REG_DBR); ++ spacemit_i2c->rx_cnt++; ++ } ++ } ++ /* if transfer completes, ISR will handle it */ ++ if (spacemit_i2c->i2c_status & (SR_MSD | SR_ACKNAK)) ++ return 0; ++ ++ /* trigger next byte receive */ ++ if (spacemit_i2c->rx_cnt < spacemit_i2c->cur_msg->len) { ++ /* send stop pulse for last byte of last msg */ ++ if (spacemit_i2c_is_last_byte_to_receive(spacemit_i2c)) ++ cr_val |= CR_STOP | CR_ACKNAK; ++ ++ cr_val |= CR_ALDIE | CR_TB; ++ spacemit_i2c_write_reg(spacemit_i2c, REG_CR, cr_val); ++ } else if (spacemit_i2c->msg_idx < spacemit_i2c->num - 1) { ++ ret = spacemit_i2c_byte_xfer_next_msg(spacemit_i2c); ++ } else { ++ /* ++ * For this branch, we do nothing, here the receive ++ * transfer is already done, the master stop interrupt ++ * should be generated to complete this transaction. ++ */ ++ } ++ } else if (spacemit_i2c->i2c_status & SR_ITE) { ++ /* MSD comes with ITE */ ++ if (spacemit_i2c->i2c_status & SR_MSD) ++ return ret; ++ ++ if (spacemit_i2c->i2c_status & SR_RWM) { ++ /* if current is transmit mode, ignore this signal */ ++ if (!spacemit_i2c->is_rx) ++ return 0; ++ ++ if (spacemit_i2c_is_last_byte_to_receive(spacemit_i2c)) ++ cr_val |= CR_STOP | CR_ACKNAK; ++ ++ /* trigger next byte receive */ ++ cr_val |= CR_ALDIE | CR_TB; ++ ++ /* ++ * Mask transmit empty interrupt to avoid useless tx ++ * interrupt signal after switch to receive mode, the ++ * next expected is receive full interrupt signal. ++ */ ++ cr_val &= ~CR_DTEIE; ++ spacemit_i2c_write_reg(spacemit_i2c, REG_CR, cr_val); ++ } else { /* transmit mode */ ++ /* if current is receive mode, ignore this signal */ ++ if (spacemit_i2c->is_rx) ++ return 0; ++ ++ if (spacemit_i2c->tx_cnt < spacemit_i2c->cur_msg->len) { ++ spacemit_i2c_write_reg(spacemit_i2c, REG_DBR, ++ *spacemit_i2c->msg_buf++); ++ spacemit_i2c->tx_cnt++; ++ ++ /* send stop pulse for last byte of last msg */ ++ if (spacemit_i2c_is_last_byte_to_send(spacemit_i2c)) ++ cr_val |= CR_STOP; ++ ++ cr_val |= CR_ALDIE | CR_TB; ++ spacemit_i2c_write_reg(spacemit_i2c, REG_CR, cr_val); ++ } else if (spacemit_i2c->msg_idx < spacemit_i2c->num - 1) { ++ ret = spacemit_i2c_byte_xfer_next_msg(spacemit_i2c); ++ } else { ++ /* ++ * For this branch, we do nothing, here the ++ * sending transfer is already done, the master ++ * stop interrupt should be generated to ++ * complete this transaction. ++ */ ++ } ++ } ++ } ++ ++ return ret; ++} ++ ++static int spacemit_i2c_byte_xfer_next_msg(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ if (spacemit_i2c->msg_idx == spacemit_i2c->num - 1) ++ return 0; ++ ++ spacemit_i2c->msg_idx++; ++ spacemit_i2c->cur_msg = spacemit_i2c->msgs + spacemit_i2c->msg_idx; ++ spacemit_i2c->msg_buf = spacemit_i2c->cur_msg->buf; ++ spacemit_i2c->rx_cnt = 0; ++ spacemit_i2c->tx_cnt = 0; ++ spacemit_i2c->i2c_err = 0; ++ spacemit_i2c->i2c_status = 0; ++ spacemit_i2c->smbus_rcv_len = false; ++ spacemit_i2c->phase = SPACEMIT_I2C_XFER_IDLE; ++ ++ spacemit_i2c_mark_rw_flag(spacemit_i2c); ++ ++ return spacemit_i2c_byte_xfer(spacemit_i2c); ++} ++ ++static void spacemit_i2c_fifo_xfer_fill_buffer(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ int finish, count = 0, fill = 0; ++ u32 data = 0; ++ u32 data_buf[SPACEMIT_I2C_TX_FIFO_DEPTH * 2]; ++ int data_cnt = 0, i; ++ unsigned long flags; ++ ++ while (spacemit_i2c->msg_idx < spacemit_i2c->num) { ++ spacemit_i2c_mark_rw_flag(spacemit_i2c); ++ ++ if (spacemit_i2c->is_rx) ++ finish = spacemit_i2c->rx_cnt; ++ else ++ finish = spacemit_i2c->tx_cnt; ++ ++ /* write master code to fifo buffer */ ++ if (spacemit_i2c->high_mode && spacemit_i2c->is_xfer_start) { ++ data = spacemit_i2c->master_code; ++ data |= WFIFO_CTRL_TB | WFIFO_CTRL_START; ++ data_buf[data_cnt++] = data; ++ ++ fill += 2; ++ count = min_t(size_t, spacemit_i2c->cur_msg->len - finish, ++ SPACEMIT_I2C_TX_FIFO_DEPTH - fill); ++ } else { ++ fill += 1; ++ count = min_t(size_t, spacemit_i2c->cur_msg->len - finish, ++ SPACEMIT_I2C_TX_FIFO_DEPTH - fill); ++ } ++ ++ spacemit_i2c->is_xfer_start = false; ++ fill += count; ++ data = spacemit_i2c->slave_addr_rw; ++ data |= WFIFO_CTRL_TB | WFIFO_CTRL_START; ++ ++ /* write slave address to fifo buffer */ ++ data_buf[data_cnt++] = data; ++ ++ if (spacemit_i2c->is_rx) { ++ spacemit_i2c->rx_cnt += count; ++ ++ if (spacemit_i2c->rx_cnt == spacemit_i2c->cur_msg->len && ++ spacemit_i2c->msg_idx == spacemit_i2c->num - 1) ++ count -= 1; ++ ++ while (count > 0) { ++ data = *spacemit_i2c->msg_buf | WFIFO_CTRL_TB; ++ data_buf[data_cnt++] = data; ++ spacemit_i2c->msg_buf++; ++ count--; ++ } ++ ++ if (spacemit_i2c->rx_cnt == spacemit_i2c->cur_msg->len && ++ spacemit_i2c->msg_idx == spacemit_i2c->num - 1) { ++ data = *spacemit_i2c->msg_buf++; ++ data = spacemit_i2c->slave_addr_rw | WFIFO_CTRL_TB | ++ WFIFO_CTRL_STOP | WFIFO_CTRL_ACKNAK; ++ data_buf[data_cnt++] = data; ++ } ++ } else { ++ spacemit_i2c->tx_cnt += count; ++ if (spacemit_i2c_is_last_byte_to_send(spacemit_i2c)) ++ count -= 1; ++ ++ while (count > 0) { ++ data = *spacemit_i2c->msg_buf | WFIFO_CTRL_TB; ++ data_buf[data_cnt++] = data; ++ spacemit_i2c->msg_buf++; ++ count--; ++ } ++ if (spacemit_i2c_is_last_byte_to_send(spacemit_i2c)) { ++ data = *spacemit_i2c->msg_buf | WFIFO_CTRL_TB | ++ WFIFO_CTRL_STOP; ++ data_buf[data_cnt++] = data; ++ } ++ } ++ ++ if (spacemit_i2c->tx_cnt == spacemit_i2c->cur_msg->len || ++ spacemit_i2c->rx_cnt == spacemit_i2c->cur_msg->len) { ++ spacemit_i2c->msg_idx++; ++ if (spacemit_i2c->msg_idx == spacemit_i2c->num) ++ break; ++ ++ spacemit_i2c->cur_msg = spacemit_i2c->msgs + spacemit_i2c->msg_idx; ++ spacemit_i2c->msg_buf = spacemit_i2c->cur_msg->buf; ++ spacemit_i2c->rx_cnt = 0; ++ spacemit_i2c->tx_cnt = 0; ++ } ++ ++ if (fill == SPACEMIT_I2C_TX_FIFO_DEPTH) ++ break; ++ } ++ ++ spin_lock_irqsave(&spacemit_i2c->fifo_lock, flags); ++ for (i = 0; i < data_cnt; i++) ++ spacemit_i2c_write_reg(spacemit_i2c, REG_WFIFO, data_buf[i]); ++ spin_unlock_irqrestore(&spacemit_i2c->fifo_lock, flags); ++} ++ ++static void spacemit_i2c_fifo_xfer_copy_buffer(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ int idx = 0, cnt = 0; ++ struct i2c_msg *msg; ++ ++ /* copy the rx FIFO buffer to msg */ ++ while (idx < spacemit_i2c->num) { ++ msg = spacemit_i2c->msgs + idx; ++ if (msg->flags & I2C_M_RD) { ++ cnt = msg->len; ++ while (cnt > 0) { ++ *(msg->buf + msg->len - cnt) ++ = spacemit_i2c_read_reg(spacemit_i2c, REG_RFIFO); ++ cnt--; ++ } ++ } ++ idx++; ++ } ++} ++ ++static int spacemit_i2c_fifo_xfer(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ int ret = 0; ++ unsigned long time_left; ++ ++ spacemit_i2c_fifo_xfer_fill_buffer(spacemit_i2c); ++ ++ time_left = wait_for_completion_timeout(&spacemit_i2c->complete, ++ spacemit_i2c->timeout); ++ if (unlikely(time_left == 0)) { ++ dev_alert(spacemit_i2c->dev, "fifo transfer timeout\n"); ++ spacemit_i2c_bus_reset(spacemit_i2c); ++ ret = -ETIMEDOUT; ++ goto err_out; ++ } ++ ++ if (unlikely(spacemit_i2c->i2c_err)) { ++ ret = -1; ++ spacemit_i2c_flush_fifo_buffer(spacemit_i2c); ++ goto err_out; ++ } ++ ++ spacemit_i2c_fifo_xfer_copy_buffer(spacemit_i2c); ++ ++err_out: ++ return ret; ++} ++ ++static int spacemit_i2c_byte_xfer(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ int ret = 0; ++ ++ /* i2c error occurs */ ++ if (unlikely(spacemit_i2c->i2c_err)) ++ return -1; ++ ++ if (spacemit_i2c->phase == SPACEMIT_I2C_XFER_IDLE) { ++ if (spacemit_i2c->high_mode && spacemit_i2c->is_xfer_start) ++ spacemit_i2c_byte_xfer_send_master_code(spacemit_i2c); ++ else ++ spacemit_i2c_byte_xfer_send_slave_addr(spacemit_i2c); ++ ++ spacemit_i2c->is_xfer_start = false; ++ } else if (spacemit_i2c->phase == SPACEMIT_I2C_XFER_MASTER_CODE) { ++ spacemit_i2c_byte_xfer_send_slave_addr(spacemit_i2c); ++ } else { ++ ret = spacemit_i2c_byte_xfer_body(spacemit_i2c); ++ } ++ ++ return ret; ++} ++ ++static void spacemit_i2c_print_msg_info(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ int i, j, idx; ++ char printbuf[512]; ++ ++ idx = sprintf(printbuf, "msgs: %d, mode: %d", spacemit_i2c->num, ++ spacemit_i2c->xfer_mode); ++ for (i = 0; i < spacemit_i2c->num && i < sizeof(printbuf) / 128; i++) { ++ u16 len = spacemit_i2c->msgs[i].len & 0xffff; ++ ++ idx += sprintf(printbuf + idx, ", addr: %02x", ++ spacemit_i2c->msgs[i].addr); ++ idx += sprintf(printbuf + idx, ", flag: %c, len: %d", ++ spacemit_i2c->msgs[i].flags & I2C_M_RD ? 'R' : 'W', len); ++ if (!(spacemit_i2c->msgs[i].flags & I2C_M_RD)) { ++ idx += sprintf(printbuf + idx, ", data:"); ++ /* print at most ten bytes of data */ ++ for (j = 0; j < len && j < 10; j++) ++ idx += sprintf(printbuf + idx, " %02x", ++ spacemit_i2c->msgs[i].buf[j]); ++ } ++ } ++ ++} ++ ++static int spacemit_i2c_handle_err(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ if (unlikely(spacemit_i2c->i2c_err)) { ++ dev_dbg(spacemit_i2c->dev, "i2c error status: 0x%08x\n", ++ spacemit_i2c->i2c_status); ++ if (spacemit_i2c->i2c_err & (SR_BED | SR_ALD)) ++ spacemit_i2c_reset(spacemit_i2c); ++ ++ /* try transfer again */ ++ if (spacemit_i2c->i2c_err & (SR_RXOV | SR_ALD)) { ++ spacemit_i2c_flush_fifo_buffer(spacemit_i2c); ++ return -EAGAIN; ++ } ++ return (spacemit_i2c->i2c_status & SR_ACKNAK) ? -ENXIO : -EIO; ++ } ++ ++ return 0; ++} ++ ++static irqreturn_t spacemit_i2c_int_handler(int irq, void *devid) ++{ ++ struct spacemit_i2c_dev *spacemit_i2c = devid; ++ u32 status, ctrl; ++ int ret = 0; ++ ++ /* record i2c status */ ++ status = spacemit_i2c_read_reg(spacemit_i2c, REG_SR); ++ spacemit_i2c->i2c_status = status; ++ ++ /* check if a valid interrupt status */ ++ if (!status) ++ /* nothing need be done */ ++ return IRQ_HANDLED; ++ ++ /* bus error, rx overrun, arbitration lost */ ++ spacemit_i2c->i2c_err = status & (SR_BED | SR_RXOV | SR_ALD); ++ ++ /* clear interrupt status bits[31:18]*/ ++ spacemit_i2c_clear_int_status(spacemit_i2c, status); ++ ++ /* i2c error happens */ ++ if (unlikely(spacemit_i2c->i2c_err)) ++ goto err_out; ++ ++ /* process interrupt mode */ ++ if (likely(spacemit_i2c->xfer_mode == SPACEMIT_I2C_MODE_INTERRUPT)) ++ ret = spacemit_i2c_byte_xfer(spacemit_i2c); ++ ++err_out: ++ /* ++ * send transaction complete signal: ++ * error happens, detect master stop ++ */ ++ if (likely(spacemit_i2c->i2c_err || (ret < 0) || (status & SR_MSD))) { ++ /* ++ * Here the transaction is already done, we don't need any ++ * other interrupt signals from now, in case any interrupt ++ * happens before spacemit_i2c_xfer to disable irq and i2c unit, ++ * we mask all the interrupt signals and clear the interrupt ++ * status. ++ */ ++ ctrl = spacemit_i2c_read_reg(spacemit_i2c, REG_CR); ++ ctrl &= ~SPACEMIT_I2C_INT_CTRL_MASK; ++ spacemit_i2c_write_reg(spacemit_i2c, REG_CR, ctrl); ++ ++ spacemit_i2c_clear_int_status(spacemit_i2c, SPACEMIT_I2C_INT_STATUS_MASK); ++ ++ complete(&spacemit_i2c->complete); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static void spacemit_i2c_choose_xfer_mode(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ unsigned long timeout; ++ int idx = 0, cnt = 0, freq; ++ bool block = false; ++ ++ /* scan msgs */ ++ if (spacemit_i2c->high_mode) ++ cnt++; ++ spacemit_i2c->rx_total = 0; ++ while (idx < spacemit_i2c->num) { ++ cnt += (spacemit_i2c->msgs + idx)->len + 1; ++ if ((spacemit_i2c->msgs + idx)->flags & I2C_M_RD) ++ spacemit_i2c->rx_total += (spacemit_i2c->msgs + idx)->len; ++ ++ /* ++ * Some SMBus transactions require that ++ * we receive the transacttion length as the first read byte. ++ * force to use I2C_MODE_INTERRUPT ++ */ ++ if ((spacemit_i2c->msgs + idx)->flags & I2C_M_RECV_LEN) { ++ block = true; ++ cnt += I2C_SMBUS_BLOCK_MAX + 2; ++ } ++ idx++; ++ } ++ ++ spacemit_i2c->xfer_mode = SPACEMIT_I2C_MODE_INTERRUPT; ++ ++ /* calculate timeout */ ++ if (likely(spacemit_i2c->high_mode)) ++ freq = 1500000; ++ else if (likely(spacemit_i2c->fast_mode)) ++ freq = 400000; ++ else ++ freq = 100000; ++ ++ timeout = cnt * 9 * USEC_PER_SEC / freq; ++ ++ if (likely(spacemit_i2c->xfer_mode == SPACEMIT_I2C_MODE_INTERRUPT || ++ spacemit_i2c->xfer_mode == SPACEMIT_I2C_MODE_PIO)) ++ timeout += (cnt - 1) * 220; ++ ++ if (spacemit_i2c->xfer_mode == SPACEMIT_I2C_MODE_INTERRUPT) ++ spacemit_i2c->timeout = usecs_to_jiffies(timeout + 500000); ++ else ++ spacemit_i2c->timeout = usecs_to_jiffies(timeout + 100000); ++} ++ ++static void spacemit_i2c_init_xfer_params(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ /* initialize transfer parameters */ ++ spacemit_i2c->msg_idx = 0; ++ spacemit_i2c->cur_msg = spacemit_i2c->msgs; ++ spacemit_i2c->msg_buf = spacemit_i2c->cur_msg->buf; ++ spacemit_i2c->rx_cnt = 0; ++ spacemit_i2c->tx_cnt = 0; ++ spacemit_i2c->i2c_err = 0; ++ spacemit_i2c->i2c_status = 0; ++ spacemit_i2c->phase = SPACEMIT_I2C_XFER_IDLE; ++ ++ /* only send master code once for high speed mode */ ++ spacemit_i2c->is_xfer_start = true; ++} ++ ++static int spacemit_i2c_pio_xfer(struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ int ret = 0, xfer_try = 0; ++ u32 status; ++ signed long timeout; ++ ++xfer_retry: ++ /* calculate timeout */ ++ spacemit_i2c_choose_xfer_mode(spacemit_i2c); ++ spacemit_i2c->xfer_mode = SPACEMIT_I2C_MODE_PIO; ++ timeout = jiffies_to_usecs(spacemit_i2c->timeout); ++ ++ if (!spacemit_i2c->clk_always_on) ++ clk_enable(spacemit_i2c->clk); ++ spacemit_i2c_controller_reset(spacemit_i2c); ++ udelay(2); ++ ++ spacemit_i2c_unit_init(spacemit_i2c); ++ ++ spacemit_i2c_clear_int_status(spacemit_i2c, SPACEMIT_I2C_INT_STATUS_MASK); ++ ++ spacemit_i2c_init_xfer_params(spacemit_i2c); ++ ++ spacemit_i2c_mark_rw_flag(spacemit_i2c); ++ ++ spacemit_i2c_enable(spacemit_i2c); ++ ++ ret = spacemit_i2c_byte_xfer(spacemit_i2c); ++ if (unlikely(ret < 0)) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ while (spacemit_i2c->num > 0 && timeout > 0) { ++ status = spacemit_i2c_read_reg(spacemit_i2c, REG_SR); ++ spacemit_i2c_clear_int_status(spacemit_i2c, status); ++ spacemit_i2c->i2c_status = status; ++ ++ /* bus error, arbitration lost */ ++ spacemit_i2c->i2c_err = status & (SR_BED | SR_ALD); ++ if (unlikely(spacemit_i2c->i2c_err)) { ++ ret = -1; ++ break; ++ } ++ ++ /* receive full */ ++ if (likely(status & SR_IRF)) { ++ ret = spacemit_i2c_byte_xfer(spacemit_i2c); ++ if (unlikely(ret < 0)) ++ break; ++ } ++ ++ /* transmit empty */ ++ if (likely(status & SR_ITE)) { ++ ret = spacemit_i2c_byte_xfer(spacemit_i2c); ++ if (unlikely(ret < 0)) ++ break; ++ } ++ ++ /* transaction done */ ++ if (likely(status & SR_MSD)) ++ break; ++ ++ udelay(10); ++ timeout -= 10; ++ } ++ ++ spacemit_i2c_disable(spacemit_i2c); ++ ++ if (!spacemit_i2c->clk_always_on) ++ clk_disable(spacemit_i2c->clk); ++ ++ if (unlikely(timeout <= 0)) { ++ dev_alert(spacemit_i2c->dev, "i2c pio transfer timeout\n"); ++ spacemit_i2c_print_msg_info(spacemit_i2c); ++ spacemit_i2c_bus_reset(spacemit_i2c); ++ udelay(100); ++ ret = -ETIMEDOUT; ++ goto out; ++ } ++ ++ /* process i2c error */ ++ if (unlikely(spacemit_i2c->i2c_err)) { ++ dev_dbg(spacemit_i2c->dev, "i2c pio error status: 0x%08x\n", ++ spacemit_i2c->i2c_status); ++ spacemit_i2c_print_msg_info(spacemit_i2c); ++ ++ /* try transfer again */ ++ if (spacemit_i2c->i2c_err & SR_ALD) ++ ret = -EAGAIN; ++ else ++ ret = (spacemit_i2c->i2c_status & SR_ACKNAK) ? -ENXIO : -EIO; ++ } ++ ++out: ++ xfer_try++; ++ /* retry i2c transfer 3 times for timeout and bus busy */ ++ if (unlikely((ret == -ETIMEDOUT || ret == -EAGAIN) && ++ xfer_try <= spacemit_i2c->drv_retries)) { ++ dev_alert(spacemit_i2c->dev, "i2c pio retry %d, ret %d err 0x%x\n", ++ xfer_try, ret, spacemit_i2c->i2c_err); ++ udelay(150); ++ ret = 0; ++ goto xfer_retry; ++ } ++ ++ return ret < 0 ? ret : spacemit_i2c->num; ++} ++ ++static bool spacemit_i2c_restart_notify; ++static bool spacemit_i2c_poweroff_notify; ++struct sys_off_handler *i2c_poweroff_handler; ++ ++static int spacemit_i2c_notifier_reboot_call(struct notifier_block *nb, ++ unsigned long action, void *data) ++{ ++ spacemit_i2c_restart_notify = true; ++ return 0; ++} ++ ++static int spacemit_i2c_notifier_poweroff_call(struct sys_off_data *data) ++{ ++ spacemit_i2c_poweroff_notify = true; ++ ++ return NOTIFY_DONE; ++} ++ ++static struct notifier_block spacemit_i2c_sys_nb = { ++ .notifier_call = spacemit_i2c_notifier_reboot_call, ++ .priority = 0, ++}; ++ ++static int spacemit_i2c_xfer(struct i2c_adapter *adapt, struct i2c_msg msgs[], int num) ++{ ++ struct spacemit_i2c_dev *spacemit_i2c = i2c_get_adapdata(adapt); ++ int ret = 0, xfer_try = 0; ++ unsigned long time_left; ++ bool clk_directly = false; ++ ++ /* ++ * at the end of system power off sequence, system will send ++ * software power down command to pmic via i2c interface ++ * with local irq disabled, so just enter PIO mode at once ++ */ ++ if (unlikely(spacemit_i2c_restart_notify == true || ++ spacemit_i2c_poweroff_notify == true)) { ++ spacemit_i2c->msgs = msgs; ++ spacemit_i2c->num = num; ++ ++ return spacemit_i2c_pio_xfer(spacemit_i2c); ++ } ++ ++ mutex_lock(&spacemit_i2c->mtx); ++ spacemit_i2c->msgs = msgs; ++ spacemit_i2c->num = num; ++ ++ if (spacemit_i2c->shutdown) { ++ mutex_unlock(&spacemit_i2c->mtx); ++ return -ENXIO; ++ } ++ ++ if (!spacemit_i2c->clk_always_on) { ++ ret = pm_runtime_get_sync(spacemit_i2c->dev); ++ if (unlikely(ret < 0)) { ++ /* ++ * during system suspend_late to system resume_early stage, ++ * if PM runtime is suspended, we will get -EACCES return ++ * value, so we need to enable clock directly, and disable after ++ * i2c transfer is finished, if PM runtime is active, it will ++ * work normally. During this stage, pmic onkey ISR that ++ * invoked in an irq thread may use i2c interface if we have ++ * onkey press action ++ */ ++ if (likely(ret == -EACCES)) { ++ clk_directly = true; ++ clk_enable(spacemit_i2c->clk); ++ } else { ++ dev_err(spacemit_i2c->dev, "pm runtime sync error: %d\n", ++ ret); ++ goto err_runtime; ++ } ++ } ++ } ++ ++xfer_retry: ++ /* if unit keeps the last control status, don't need to do reset */ ++ if (unlikely(spacemit_i2c_read_reg(spacemit_i2c, REG_CR) != ++ spacemit_i2c->i2c_ctrl_reg_value)) ++ /* i2c controller & bus reset */ ++ spacemit_i2c_reset(spacemit_i2c); ++ ++ /* choose transfer mode */ ++ spacemit_i2c_choose_xfer_mode(spacemit_i2c); ++ ++ /* i2c unit init */ ++ spacemit_i2c_unit_init(spacemit_i2c); ++ ++ /* clear all interrupt status */ ++ spacemit_i2c_clear_int_status(spacemit_i2c, SPACEMIT_I2C_INT_STATUS_MASK); ++ ++ spacemit_i2c_init_xfer_params(spacemit_i2c); ++ ++ spacemit_i2c_mark_rw_flag(spacemit_i2c); ++ ++ reinit_completion(&spacemit_i2c->complete); ++ ++ spacemit_i2c_enable(spacemit_i2c); ++ ++ /* i2c wait for bus busy */ ++ ret = spacemit_i2c_recover_bus_busy(spacemit_i2c); ++ if (unlikely(ret)) ++ goto timeout_xfex; ++ ++ /* i2c msg transmit */ ++ if (likely(spacemit_i2c->xfer_mode == SPACEMIT_I2C_MODE_INTERRUPT)) ++ ret = spacemit_i2c_byte_xfer(spacemit_i2c); ++ else ++ ret = spacemit_i2c_fifo_xfer(spacemit_i2c); ++ ++ if (unlikely(ret < 0)) { ++ dev_dbg(spacemit_i2c->dev, "i2c transfer error\n"); ++ /* timeout error should not be overridden, and the transfer ++ * error will be confirmed by err handle function latter, ++ * the reset should be invalid argument error. ++ */ ++ if (ret != -ETIMEDOUT) ++ ret = -EINVAL; ++ goto err_xfer; ++ } ++ ++ if (likely(spacemit_i2c->xfer_mode == SPACEMIT_I2C_MODE_INTERRUPT)) { ++ time_left = wait_for_completion_timeout(&spacemit_i2c->complete, ++ spacemit_i2c->timeout); ++ if (unlikely(time_left == 0)) { ++ dev_alert(spacemit_i2c->dev, "msg completion timeout\n"); ++ spacemit_i2c_bus_reset(spacemit_i2c); ++ spacemit_i2c_reset(spacemit_i2c); ++ ret = -ETIMEDOUT; ++ goto timeout_xfex; ++ } ++ } ++ ++err_xfer: ++ if (likely(!ret)) ++ spacemit_i2c_check_bus_release(spacemit_i2c); ++ ++timeout_xfex: ++ /* disable spacemit i2c */ ++ spacemit_i2c_disable(spacemit_i2c); ++ ++ /* print more message info when error or timeout happens */ ++ if (unlikely(ret < 0 || spacemit_i2c->i2c_err)) ++ spacemit_i2c_print_msg_info(spacemit_i2c); ++ ++ /* process i2c error */ ++ if (unlikely(spacemit_i2c->i2c_err)) ++ ret = spacemit_i2c_handle_err(spacemit_i2c); ++ ++ xfer_try++; ++ /* retry i2c transfer 3 times for timeout and bus busy */ ++ if (unlikely((ret == -ETIMEDOUT || ret == -EAGAIN) && ++ xfer_try <= spacemit_i2c->drv_retries)) { ++ dev_alert(spacemit_i2c->dev, ++ "i2c transfer retry %d, ret %d mode %d err 0x%x\n", ++ xfer_try, ret, spacemit_i2c->xfer_mode, ++ spacemit_i2c->i2c_err); ++ usleep_range(150, 200); ++ ret = 0; ++ goto xfer_retry; ++ } ++ ++err_runtime: ++ if (unlikely(clk_directly)) { ++ /* if clock is enabled directly, here disable it */ ++ clk_disable(spacemit_i2c->clk); ++ } ++ ++ if (!spacemit_i2c->clk_always_on) { ++ pm_runtime_mark_last_busy(spacemit_i2c->dev); ++ pm_runtime_put_autosuspend(spacemit_i2c->dev); ++ } ++ ++ mutex_unlock(&spacemit_i2c->mtx); ++ ++ return ret < 0 ? ret : num; ++} ++ ++static u32 spacemit_i2c_func(struct i2c_adapter *adap) ++{ ++ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); ++} ++ ++static const struct i2c_algorithm spacemit_i2c_algrtm = { ++ .master_xfer = spacemit_i2c_xfer, ++ .functionality = spacemit_i2c_func, ++}; ++ ++static int spacemit_i2c_parse_dt(struct platform_device *pdev, ++ struct spacemit_i2c_dev *spacemit_i2c) ++{ ++ int ret; ++ struct device_node *dnode = pdev->dev.of_node; ++ ++ /* enable fast speed mode */ ++ spacemit_i2c->fast_mode = of_property_read_bool(dnode, "spacemit,i2c-fast-mode"); ++ ++ /* enable high speed mode */ ++ spacemit_i2c->high_mode = of_property_read_bool(dnode, "spacemit,i2c-high-mode"); ++ if (spacemit_i2c->high_mode) { ++ /* get master code for high speed mode */ ++ ret = of_property_read_u8(dnode, "spacemit,i2c-master-code", ++ &spacemit_i2c->master_code); ++ if (ret) { ++ spacemit_i2c->master_code = 0x0e; ++ dev_warn(spacemit_i2c->dev, ++ "failed to get i2c master code, use default: 0x0e\n"); ++ } ++ } ++ ret = of_property_read_u32(dnode, "spacemit,i2c-clk-rate", ++ &spacemit_i2c->clk_rate); ++ if (ret) { ++ dev_err(spacemit_i2c->dev, ++ "failed to get i2c clock rate\n"); ++ return ret; ++ } ++ ++ ret = of_property_read_u32(dnode, "spacemit,i2c-lcr", &spacemit_i2c->i2c_lcr); ++ if (ret) { ++ dev_err(spacemit_i2c->dev, "failed to get i2c lcr\n"); ++ return ret; ++ } ++ ++ ret = of_property_read_u32(dnode, "spacemit,i2c-wcr", &spacemit_i2c->i2c_wcr); ++ if (ret) { ++ dev_err(spacemit_i2c->dev, "failed to get i2c wcr\n"); ++ return ret; ++ } ++ ++ /* ++ * adapter device id: ++ * assigned in dt node or alias name, or automatically allocated ++ * in i2c_add_numbered_adapter() ++ */ ++ ret = of_property_read_u32(dnode, "spacemit,adapter-id", &pdev->id); ++ if (ret) ++ pdev->id = -1; ++ ++ /* default: interrupt mode */ ++ spacemit_i2c->xfer_mode = SPACEMIT_I2C_MODE_INTERRUPT; ++ ++ /* true: the clock will always on and not use runtime mechanism */ ++ spacemit_i2c->clk_always_on = of_property_read_bool(dnode, "spacemit,clk-always-on"); ++ ++ /* apb clock: 26MHz or 52MHz */ ++ ret = of_property_read_u32(dnode, "spacemit,apb_clock", &spacemit_i2c->apb_clock); ++ if (ret) { ++ dev_err(spacemit_i2c->dev, "failed to get apb clock\n"); ++ return ret; ++ } else if ((spacemit_i2c->apb_clock != SPACEMIT_I2C_APB_CLOCK_26M) && ++ (spacemit_i2c->apb_clock != SPACEMIT_I2C_APB_CLOCK_52M)) { ++ dev_err(spacemit_i2c->dev, "the apb clock should be 26M or 52M\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int spacemit_i2c_probe(struct platform_device *pdev) ++{ ++ int ret = 0; ++ struct spacemit_i2c_dev *spacemit_i2c; ++ struct device_node *dnode = pdev->dev.of_node; ++ ++ /* allocate memory */ ++ spacemit_i2c = devm_kzalloc(&pdev->dev, sizeof(struct spacemit_i2c_dev), GFP_KERNEL); ++ if (!spacemit_i2c) { ++ ret = -ENOMEM; ++ goto err_out; ++ } ++ ++ spacemit_i2c->dev = &pdev->dev; ++ platform_set_drvdata(pdev, spacemit_i2c); ++ mutex_init(&spacemit_i2c->mtx); ++ ++ spacemit_i2c->resets = devm_reset_control_get_optional(&pdev->dev, NULL); ++ if (IS_ERR(spacemit_i2c->resets)) { ++ dev_err(&pdev->dev, "failed to get resets\n"); ++ goto err_out; ++ } ++ /* reset the i2c controller */ ++ reset_control_assert(spacemit_i2c->resets); ++ udelay(200); ++ reset_control_deassert(spacemit_i2c->resets); ++ ++ ret = spacemit_i2c_parse_dt(pdev, spacemit_i2c); ++ if (ret) ++ goto err_out; ++ ++ ret = of_address_to_resource(dnode, 0, &spacemit_i2c->resrc); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to get resource\n"); ++ ret = -ENODEV; ++ goto err_out; ++ } ++ ++ spacemit_i2c->mapbase = devm_ioremap_resource(spacemit_i2c->dev, &spacemit_i2c->resrc); ++ if (IS_ERR(spacemit_i2c->mapbase)) { ++ dev_err(&pdev->dev, "failed to do ioremap\n"); ++ ret = PTR_ERR(spacemit_i2c->mapbase); ++ goto err_out; ++ } ++ ++ spacemit_i2c->irq = platform_get_irq(pdev, 0); ++ if (spacemit_i2c->irq < 0) { ++ dev_err(spacemit_i2c->dev, "failed to get irq resource\n"); ++ ret = spacemit_i2c->irq; ++ goto err_out; ++ } ++ ++ ret = devm_request_irq(spacemit_i2c->dev, spacemit_i2c->irq, spacemit_i2c_int_handler, ++ IRQF_NO_SUSPEND, dev_name(spacemit_i2c->dev), spacemit_i2c); ++ if (ret) { ++ dev_err(spacemit_i2c->dev, "failed to request irq\n"); ++ goto err_out; ++ } ++ ++ spacemit_i2c->clk = devm_clk_get(spacemit_i2c->dev, NULL); ++ if (IS_ERR(spacemit_i2c->clk)) { ++ dev_err(spacemit_i2c->dev, "failed to get clock\n"); ++ ret = PTR_ERR(spacemit_i2c->clk); ++ goto err_out; ++ } ++ clk_prepare_enable(spacemit_i2c->clk); ++ ++ i2c_set_adapdata(&spacemit_i2c->adapt, spacemit_i2c); ++ spacemit_i2c->adapt.owner = THIS_MODULE; ++ spacemit_i2c->adapt.algo = &spacemit_i2c_algrtm; ++ spacemit_i2c->adapt.dev.parent = spacemit_i2c->dev; ++ spacemit_i2c->adapt.nr = pdev->id; ++ /* retries used by i2c framework: 3 times */ ++ spacemit_i2c->adapt.retries = 3; ++ /* ++ * retries used by i2c driver: 3 times ++ * this is for the very low occasionally PMIC i2c access failure. ++ */ ++ spacemit_i2c->drv_retries = 3; ++ spacemit_i2c->adapt.dev.of_node = dnode; ++ spacemit_i2c->adapt.algo_data = spacemit_i2c; ++ strscpy(spacemit_i2c->adapt.name, "spacemit-i2c-adapter", ++ sizeof(spacemit_i2c->adapt.name)); ++ ++ init_completion(&spacemit_i2c->complete); ++ spin_lock_init(&spacemit_i2c->fifo_lock); ++ ++ if (!spacemit_i2c->clk_always_on) { ++ pm_runtime_set_autosuspend_delay(spacemit_i2c->dev, MSEC_PER_SEC); ++ pm_runtime_use_autosuspend(spacemit_i2c->dev); ++ pm_runtime_set_active(spacemit_i2c->dev); ++ pm_suspend_ignore_children(&pdev->dev, 1); ++ pm_runtime_enable(spacemit_i2c->dev); ++ } else ++ dev_dbg(spacemit_i2c->dev, "clock keeps always on\n"); ++ ++ spacemit_i2c->shutdown = false; ++ ret = i2c_add_numbered_adapter(&spacemit_i2c->adapt); ++ if (ret) { ++ dev_err(spacemit_i2c->dev, "failed to add i2c adapter\n"); ++ goto err_clk; ++ } ++ ++ return 0; ++ ++err_clk: ++ if (!spacemit_i2c->clk_always_on) { ++ pm_runtime_disable(spacemit_i2c->dev); ++ pm_runtime_set_suspended(spacemit_i2c->dev); ++ } ++ clk_disable_unprepare(spacemit_i2c->clk); ++err_out: ++ return ret; ++} ++ ++static int spacemit_i2c_remove(struct platform_device *pdev) ++{ ++ struct spacemit_i2c_dev *spacemit_i2c = platform_get_drvdata(pdev); ++ ++ if (!spacemit_i2c->clk_always_on) { ++ pm_runtime_disable(spacemit_i2c->dev); ++ pm_runtime_set_suspended(spacemit_i2c->dev); ++ } ++ ++ i2c_del_adapter(&spacemit_i2c->adapt); ++ mutex_destroy(&spacemit_i2c->mtx); ++ reset_control_assert(spacemit_i2c->resets); ++ clk_disable_unprepare(spacemit_i2c->clk); ++ dev_dbg(spacemit_i2c->dev, "driver removed\n"); ++ ++ return 0; ++} ++ ++static const struct of_device_id spacemit_i2c_dt_match[] = { ++ { .compatible = "spacemit,k1-i2c", }, ++ {} ++}; ++ ++MODULE_DEVICE_TABLE(of, spacemit_i2c_dt_match); ++ ++static struct platform_driver spacemit_i2c_driver = { ++ .probe = spacemit_i2c_probe, ++ .remove = spacemit_i2c_remove, ++ .driver = { ++ .name = "i2c-spacemit-k1", ++ .of_match_table = spacemit_i2c_dt_match, ++ }, ++}; ++ ++static int __init spacemit_i2c_init(void) ++{ ++ register_restart_handler(&spacemit_i2c_sys_nb); ++ i2c_poweroff_handler = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, ++ SYS_OFF_PRIO_HIGH, ++ spacemit_i2c_notifier_poweroff_call, ++ NULL); ++ ++ return platform_driver_register(&spacemit_i2c_driver); ++} ++ ++static void __exit spacemit_i2c_exit(void) ++{ ++ platform_driver_unregister(&spacemit_i2c_driver); ++ unregister_restart_handler(&spacemit_i2c_sys_nb); ++ unregister_sys_off_handler(i2c_poweroff_handler); ++} ++ ++subsys_initcall(spacemit_i2c_init); ++module_exit(spacemit_i2c_exit); ++ ++MODULE_DESCRIPTION("Spacemit K1 I2C Controller Driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/i2c/busses/i2c-spacemit-k1.h b/drivers/i2c/busses/i2c-spacemit-k1.h +new file mode 100644 +index 000000000000..50397f8058b0 +--- /dev/null ++++ b/drivers/i2c/busses/i2c-spacemit-k1.h +@@ -0,0 +1,225 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Spacemit i2c driver header file ++ * ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++ ++#ifndef I2C_SPACEMIT_K1X_H ++#define I2C_SPACEMIT_K1X_H ++#include ++#include ++#include ++#include ++#include ++ ++/* spacemit i2c registers */ ++enum { ++ REG_CR = 0x0, /* Control Register */ ++ REG_SR = 0x4, /* Status Register */ ++ REG_SAR = 0x8, /* Slave Address Register */ ++ REG_DBR = 0xc, /* Data Buffer Register */ ++ REG_LCR = 0x10, /* Load Count Register */ ++ REG_WCR = 0x14, /* Wait Count Register */ ++ REG_RST_CYC = 0x18, /* Bus reset cycle counter */ ++ REG_BMR = 0x1c, /* Bus monitor register */ ++ REG_WFIFO = 0x20, /* Write FIFO Register */ ++ REG_WFIFO_WPTR = 0x24, /* Write FIFO Write Pointer Register */ ++ REG_WFIFO_RPTR = 0x28, /* Write FIFO Read Pointer Register */ ++ REG_RFIFO = 0x2c, /* Read FIFO Register */ ++ REG_RFIFO_WPTR = 0x30, /* Read FIFO Write Pointer Register */ ++ REG_RFIFO_RPTR = 0x34, /* Read FIFO Read Pointer Register */ ++}; ++ ++/* register REG_CR fields */ ++enum { ++ CR_START = BIT(0), /* start bit */ ++ CR_STOP = BIT(1), /* stop bit */ ++ CR_ACKNAK = BIT(2), /* send ACK(0) or NAK(1) */ ++ CR_TB = BIT(3), /* transfer byte bit */ ++ CR_TXBEGIN = BIT(4), /* transaction begin */ ++ CR_FIFOEN = BIT(5), /* enable FIFO mode */ ++ CR_GPIOEN = BIT(6), /* enable GPIO mode for SCL in HS */ ++ CR_DMAEN = BIT(7), /* enable DMA for TX and RX FIFOs */ ++ CR_MODE_FAST = BIT(8), /* bus mode (master operation) */ ++ CR_MODE_HIGH = BIT(9), /* bus mode (master operation) */ ++ CR_UR = BIT(10), /* unit reset */ ++ CR_RSTREQ = BIT(11), /* i2c bus reset request */ ++ CR_MA = BIT(12), /* master abort */ ++ CR_SCLE = BIT(13), /* master clock enable */ ++ CR_IUE = BIT(14), /* unit enable */ ++ CR_HS_STRETCH = BIT(16), /* I2C hs stretch */ ++ CR_ALDIE = BIT(18), /* enable arbitration interrupt */ ++ CR_DTEIE = BIT(19), /* enable tx interrupts */ ++ CR_DRFIE = BIT(20), /* enable rx interrupts */ ++ CR_GCD = BIT(21), /* general call disable */ ++ CR_BEIE = BIT(22), /* enable bus error ints */ ++ CR_SADIE = BIT(23), /* slave address detected int enable */ ++ CR_SSDIE = BIT(24), /* slave STOP detected int enable */ ++ CR_MSDIE = BIT(25), /* master STOP detected int enable */ ++ CR_MSDE = BIT(26), /* master STOP detected enable */ ++ CR_TXDONEIE = BIT(27), /* transaction done int enable */ ++ CR_TXEIE = BIT(28), /* transmit FIFO empty int enable */ ++ CR_RXHFIE = BIT(29), /* receive FIFO half-full int enable */ ++ CR_RXFIE = BIT(30), /* receive FIFO full int enable */ ++ CR_RXOVIE = BIT(31), /* receive FIFO overrun int enable */ ++}; ++ ++/* register REG_SR fields */ ++enum { ++ SR_RWM = BIT(13), /* read/write mode */ ++ SR_ACKNAK = BIT(14), /* ACK/NACK status */ ++ SR_UB = BIT(15), /* unit busy */ ++ SR_IBB = BIT(16), /* i2c bus busy */ ++ SR_EBB = BIT(17), /* early bus busy */ ++ SR_ALD = BIT(18), /* arbitration loss detected */ ++ SR_ITE = BIT(19), /* tx buffer empty */ ++ SR_IRF = BIT(20), /* rx buffer full */ ++ SR_GCAD = BIT(21), /* general call address detected */ ++ SR_BED = BIT(22), /* bus error no ACK/NAK */ ++ SR_SAD = BIT(23), /* slave address detected */ ++ SR_SSD = BIT(24), /* slave stop detected */ ++ SR_MSD = BIT(26), /* master stop detected */ ++ SR_TXDONE = BIT(27), /* transaction done */ ++ SR_TXE = BIT(28), /* tx FIFO empty */ ++ SR_RXHF = BIT(29), /* rx FIFO half-full */ ++ SR_RXF = BIT(30), /* rx FIFO full */ ++ SR_RXOV = BIT(31), /* RX FIFO overrun */ ++}; ++ ++/* register REG_LCR fields */ ++enum { ++ LCR_SLV = 0x000001FF, /* SLV: bit[8:0] */ ++ LCR_FLV = 0x0003FE00, /* FLV: bit[17:9] */ ++ LCR_HLVH = 0x07FC0000, /* HLVH: bit[26:18] */ ++ LCR_HLVL = 0xF8000000, /* HLVL: bit[31:27] */ ++}; ++ ++/* register REG_WCR fields */ ++enum { ++ WCR_COUNT = 0x0000001F, /* COUNT: bit[4:0] */ ++ WCR_COUNT1 = 0x000003E0, /* HS_COUNT1: bit[9:5] */ ++ WCR_COUNT2 = 0x00007C00, /* HS_COUNT2: bit[14:10] */ ++}; ++ ++/* register REG_BMR fields */ ++enum { ++ BMR_SDA = BIT(0), /* SDA line level */ ++ BMR_SCL = BIT(1), /* SCL line level */ ++}; ++ ++/* register REG_WFIFO fields */ ++enum { ++ WFIFO_DATA_MSK = 0x000000FF, /* data: bit[7:0] */ ++ WFIFO_CTRL_MSK = 0x000003E0, /* control: bit[11:8] */ ++ WFIFO_CTRL_START = BIT(8), /* start bit */ ++ WFIFO_CTRL_STOP = BIT(9), /* stop bit */ ++ WFIFO_CTRL_ACKNAK = BIT(10), /* send ACK(0) or NAK(1) */ ++ WFIFO_CTRL_TB = BIT(11), /* transfer byte bit */ ++}; ++ ++/* status register init value */ ++enum { ++ SPACEMIT_I2C_INT_STATUS_MASK = 0xfffc0000, /* SR bits[31:18] */ ++ SPACEMIT_I2C_INT_CTRL_MASK = (CR_ALDIE | CR_DTEIE | CR_DRFIE | ++ CR_BEIE | CR_TXDONEIE | CR_TXEIE | ++ CR_RXHFIE | CR_RXFIE | CR_RXOVIE | ++ CR_MSDIE), ++}; ++ ++/* i2c transfer mode */ ++enum spacemit_i2c_xfer_mode { ++ SPACEMIT_I2C_MODE_INTERRUPT, ++ SPACEMIT_I2C_MODE_PIO, ++ SPACEMIT_I2C_MODE_INVALID, ++}; ++ ++/* i2c transfer phase during transaction */ ++enum spacemit_i2c_xfer_phase { ++ SPACEMIT_I2C_XFER_MASTER_CODE, ++ SPACEMIT_I2C_XFER_SLAVE_ADDR, ++ SPACEMIT_I2C_XFER_BODY, ++ SPACEMIT_I2C_XFER_IDLE, ++}; ++ ++/* i2c controller FIFO depth */ ++#define SPACEMIT_I2C_RX_FIFO_DEPTH (8) ++#define SPACEMIT_I2C_TX_FIFO_DEPTH (8) ++ ++/* i2c bus recover timeout: us */ ++#define SPACEMIT_I2C_BUS_RECOVER_TIMEOUT (100000) ++ ++/* i2c bus active timeout: us */ ++#define SPACEMIT_I2C_BUS_ACTIVE_TIMEOUT (100000) ++ ++#define SPACEMIT_I2C_APB_CLOCK_26M (26000000) ++#define SPACEMIT_I2C_APB_CLOCK_52M (52000000) ++ ++/* i2c-spacemit driver's main struct */ ++struct spacemit_i2c_dev { ++ struct device *dev; ++ struct i2c_adapter adapt; ++ struct i2c_msg *msgs; ++ int num; ++ struct resource resrc; ++ struct mutex mtx; ++ spinlock_t fifo_lock; ++ int drv_retries; ++ ++ /* virtual base address mapped for register */ ++ void __iomem *mapbase; ++ ++ struct reset_control *resets; ++ struct clk *clk; ++ int irq; ++ int clk_freq_in; ++ int clk_freq_out; ++ bool clk_always_on; ++ ++ /* i2c speed mode selection */ ++ bool fast_mode; ++ bool high_mode; ++ ++ /* master code for high-speed mode */ ++ u8 master_code; ++ u32 clk_rate; ++ u32 i2c_lcr; ++ u32 i2c_wcr; ++ ++ bool shutdown; ++ ++ struct pinctrl *pinctrl; ++ struct pinctrl_state *pin_i2c_ap; ++ struct pinctrl_state *pin_i2c_cp; ++ struct pinctrl_state *pin_gpio; ++ ++ /* slave address with read/write flag */ ++ u32 slave_addr_rw; ++ ++ struct i2c_msg *cur_msg; ++ int msg_idx; ++ u8 *msg_buf; ++ bool is_rx; ++ size_t rx_cnt; ++ size_t tx_cnt; ++ bool is_xfer_start; ++ int rx_total; ++ bool smbus_rcv_len; ++ ++ struct completion complete; ++ u32 timeout; ++ enum spacemit_i2c_xfer_mode xfer_mode; ++ enum spacemit_i2c_xfer_phase phase; ++ u32 i2c_ctrl_reg_value; ++ u32 i2c_status; ++ u32 i2c_err; ++ ++ /* hwlock address */ ++ void __iomem *hwlock_addr; ++ ++ /* apb clock */ ++ u32 apb_clock; ++}; ++ ++#endif /* I2C_SPACEMIT_K1X_H */ diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig -index 6dee3b686eff..d8e74a1cd53a 100644 +index 6dee3b686eff..43c0bfab199a 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig -@@ -1448,4 +1448,17 @@ config XILINX_AMS +@@ -1032,6 +1032,16 @@ config SC27XX_ADC + This driver can also be built as a module. If so, the module + will be called sc27xx_adc. + ++config SPACEMIT_P1_ADC ++ tristate "Spacemit P1 ADC driver" ++ depends on MFD_SPACEMIT_P1 ++ help ++ Say yes here to have support for Spacemit p1 power management IC (PMIC) ++ ADC devices. ++ ++ To compile this driver as a module, choose M here: the module will be ++ called spacemit-p1-adc. ++ + config SPEAR_ADC + tristate "ST SPEAr ADC" + depends on PLAT_SPEAR || COMPILE_TEST +@@ -1448,4 +1458,17 @@ config XILINX_AMS The driver can also be built as a module. If so, the module will be called xilinx-ams. @@ -349971,14 +376465,306 @@ index 6dee3b686eff..d8e74a1cd53a 100644 + endmenu diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile -index 2facf979327d..f37c847c468a 100644 +index 2facf979327d..5bedd25f9187 100644 --- a/drivers/iio/adc/Makefile +++ b/drivers/iio/adc/Makefile -@@ -129,3 +129,4 @@ xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o +@@ -94,6 +94,7 @@ obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o + obj-$(CONFIG_RICHTEK_RTQ6056) += rtq6056.o + obj-$(CONFIG_RZG2L_ADC) += rzg2l_adc.o + obj-$(CONFIG_SC27XX_ADC) += sc27xx_adc.o ++obj-$(CONFIG_SPACEMIT_P1_ADC) += spacemit-p1-adc.o + obj-$(CONFIG_SPEAR_ADC) += spear_adc.o + obj-$(CONFIG_SUN4I_GPADC) += sun4i-gpadc-iio.o + obj-$(CONFIG_SUN20I_GPADC) += sun20i-gpadc-iio.o +@@ -129,3 +130,4 @@ xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o obj-$(CONFIG_XILINX_XADC) += xilinx-xadc.o obj-$(CONFIG_XILINX_AMS) += xilinx-ams.o obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o +obj-$(CONFIG_XUANTIE_TH1520_ADC) += th1520-adc.o +diff --git a/drivers/iio/adc/spacemit-p1-adc.c b/drivers/iio/adc/spacemit-p1-adc.c +new file mode 100644 +index 000000000000..03ccb6b8dd65 +--- /dev/null ++++ b/drivers/iio/adc/spacemit-p1-adc.c +@@ -0,0 +1,278 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * adc driver for Spacemit P1 ++ * ++ * Copyright (c) 2023, Spacemit Co., Ltd ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct spm_p1_adc_info { ++ int irq; ++ struct regmap *regmap; ++ struct mutex lock; ++ struct completion completion; ++}; ++ ++struct adc_match_data { ++ int nr_desc; ++ const char *name; ++ struct iio_chan_spec *iio_desc; ++}; ++ ++static struct adc_match_data *match_data; ++ ++static struct iio_chan_spec spm_p1_iio_desc[] = { ++ { ++ .indexed = 1, ++ .type = IIO_VOLTAGE, ++ .channel = 0, ++ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), ++ }, ++ { ++ .indexed = 1, ++ .type = IIO_VOLTAGE, ++ .channel = 1, ++ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), ++ }, ++ { ++ .indexed = 1, ++ .type = IIO_VOLTAGE, ++ .channel = 2, ++ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), ++ }, ++ { ++ .indexed = 1, ++ .type = IIO_VOLTAGE, ++ .channel = 3, ++ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), ++ }, ++ { ++ .indexed = 1, ++ .type = IIO_VOLTAGE, ++ .channel = 4, ++ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), ++ }, ++ { ++ .indexed = 1, ++ .type = IIO_VOLTAGE, ++ .channel = 5, ++ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), ++ }, ++}; ++ ++static struct adc_match_data spm_p1_adc_match_data = { ++ .iio_desc = spm_p1_iio_desc, ++ .nr_desc = ARRAY_SIZE(spm_p1_iio_desc), ++ .name = "spm_p1", ++}; ++ ++static const struct of_device_id spm_p1_adc_id_table[] = { ++ { .compatible = "spacemit,p1,adc", .data = (void *)&spm_p1_adc_match_data }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, spm_p1_adc_id_table); ++ ++static int spm_p1_adc_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val) ++{ ++ unsigned int value; ++ unsigned int adc_val_h, adc_val_l; ++ struct spm_p1_adc_info *info; ++ ++ info = iio_priv(indio_dev); ++ ++ mutex_lock(&info->lock); ++ ++ /* reset the ADC auto register */ ++ regmap_update_bits(info->regmap, SPM_P1_ADC_AUTO_REG, ++ SPM_P1_ADC_AUTO_BIT_MSK, 0); ++ ++ /* enable the ADC : ADC_CTRL[0] */ ++ regmap_update_bits(info->regmap, SPM_P1_ADC_CTRL_REG, ++ SPM_P1_ADC_CTRL_BIT_MSK, (1 << SPM_P1_ADC_CTRL_EN_BIT_OFFSET)); ++ ++ /* choose the channel of adc : ADC_CFG[1] */ ++ regmap_update_bits(info->regmap, SPM_P1_ADC_CFG1_REG, ++ SPM_P1_ADC_CFG1_ADC_CHNNL_SEL_BIT_MSK, ++ (chan->channel + SPM_P1_ADC_EXTERNAL_CHANNEL_OFFSET) << ++ SPM_P1_ADC_CFG1_ADC_CHNNL_SEL_BIT_OFFSET); ++ ++ /* ADC go */ ++ regmap_update_bits(info->regmap, SPM_P1_ADC_CTRL_REG, ++ SPM_P1_ADC_CTRL_BIT_MSK, (1 << SPM_P1_ADC_CTRL_GO_BIT_OFFSET) | ++ (1 << SPM_P1_ADC_CTRL_EN_BIT_OFFSET)); ++ ++ /* then wait the completion */ ++ wait_for_completion(&info->completion); ++ ++ regmap_read(info->regmap, SPM_P1_ADCIN0_RES_H_REG + chan->channel * 2, &adc_val_h); ++ regmap_read(info->regmap, SPM_P1_ADCIN0_RES_L_REG + chan->channel * 2, &adc_val_l); ++ ++ regmap_read(info->regmap, SPM_P1_VERSION_ID_REG, &value); ++ ++ *val = (adc_val_h << (ffs(SPM_P1_ADCIN0_REG_L_BIT_MSK) - 1)) | ( ++ (adc_val_l & SPM_P1_ADCIN0_REG_L_BIT_MSK) >> ++ (ffs(SPM_P1_ADCIN0_REG_L_BIT_MSK) - 1)); ++ ++ if (value == 0) { ++ /* ++ * if the version of P1 is A, the data read from the ++ * register is the inverse of the real data and the ++ * conversion accuracy of P1 is 12 bits ++ */ ++ *val = 4095 - *val; ++ } ++ ++ pr_debug("%s:%d, read channel:%d, val:%u\n", __func__, __LINE__, chan->channel, ++ *val); ++ ++ mutex_unlock(&info->lock); ++ ++ return IIO_VAL_INT; ++} ++ ++static int spm_p1_adc_scale(struct iio_chan_spec const *chan, int *val, int *val2) ++{ ++ switch (chan->type) { ++ case IIO_VOLTAGE: ++ *val = 0; ++ /* 3000 % 4095 ~ 0.7326mv */ ++ *val2 = 732600; ++ return IIO_VAL_INT_PLUS_MICRO; ++ ++ default: ++ return -EINVAL; ++ } ++ ++ return -EINVAL; ++} ++ ++static int spm_p1_adc_read_raw(struct iio_dev *indio_dev, ++ struct iio_chan_spec const *chan, ++ int *val, int *val2, long mask) ++{ ++ switch (mask) { ++ case IIO_CHAN_INFO_RAW: ++ return spm_p1_adc_raw(indio_dev, chan, val); ++ ++ case IIO_CHAN_INFO_SCALE: ++ return spm_p1_adc_scale(chan, val, val2); ++ ++ default: ++ return -EINVAL; ++ } ++ ++ return -EINVAL; ++} ++ ++static const struct iio_info spm_p1_adc_iio_info = { ++ .read_raw = &spm_p1_adc_read_raw, ++}; ++ ++static irqreturn_t adc_complete_irq(int irq, void *_adc) ++{ ++ struct spm_p1_adc_info *info; ++ struct iio_dev *indio_dev = (struct iio_dev *)_adc; ++ ++ info = iio_priv(indio_dev); ++ ++ complete(&info->completion); ++ ++ return IRQ_HANDLED; ++} ++ ++static void spm_p1_adc_init(struct iio_dev *indio_dev) ++{ ++ struct spm_p1_adc_info *info = iio_priv(indio_dev); ++ ++ /* enable chop */ ++ regmap_update_bits(info->regmap, SPM_P1_ADC_CFG1_REG, SPM_P1_ADC_CFG1_ADC_CHOP_EN_BIT_MSK, ++ 1 << SPM_P1_ADC_CFG1_ADC_CHOP_EN_BIT_OFFSET); ++ ++ /* set the vref: 3v3 */ ++ regmap_update_bits(info->regmap, SPM_P1_ADC_CFG2_REG, SPM_P1_ADC_CFG2_REF_SEL_BIT_MSK, ++ SPM_P1_ADC_CFG2_3V3_REF << SPM_P1_ADC_CFG2_REF_SEL_BIT_OFFSET); ++ /* set adc deb num: 7 */ ++ regmap_update_bits(info->regmap, SPM_P1_ADC_CFG2_REG, SPM_P1_ADC_CFG2_DEB_NUM_BIT_MSK, ++ SPM_P1_ADC_CFG2_7_DEB_NUM << SPM_P1_ADC_CFG2_DEB_NUM_BIT_OFFSET); ++} ++ ++static int spm_p1_adc_probe(struct platform_device *pdev) ++{ ++ int ret; ++ struct iio_dev *indio_dev; ++ struct spm_p1_adc_info *info; ++ const struct of_device_id *of_id; ++ struct spacemit_pmic *pmic = dev_get_drvdata(pdev->dev.parent); ++ ++ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*info)); ++ if (!indio_dev) ++ return -ENOMEM; ++ ++ info = iio_priv(indio_dev); ++ info->irq = platform_get_irq(pdev, 0); ++ if (info->irq < 0) { ++ dev_err(&pdev->dev, "get irq failed\n"); ++ return info->irq; ++ } ++ ++ ret = devm_request_any_context_irq(&pdev->dev, info->irq, ++ adc_complete_irq, IRQF_TRIGGER_NONE | IRQF_ONESHOT, ++ "p1-adc", indio_dev); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "Can't register adc irq: %d\n", ret); ++ return ret; ++ } ++ ++ info->regmap = pmic->regmap; ++ ++ mutex_init(&info->lock); ++ init_completion(&info->completion); ++ ++ of_id = of_match_device(spm_p1_adc_id_table, &pdev->dev); ++ if (!of_id) { ++ dev_err(&pdev->dev, "Unable to match OF ID\n"); ++ return -ENODEV; ++ } ++ ++ /* adc init */ ++ spm_p1_adc_init(indio_dev); ++ ++ match_data = (struct adc_match_data *)of_id->data; ++ ++ indio_dev->name = pdev->name; ++ indio_dev->channels = match_data->iio_desc; ++ indio_dev->num_channels = match_data->nr_desc; ++ indio_dev->info = &spm_p1_adc_iio_info; ++ indio_dev->modes = INDIO_DIRECT_MODE; ++ ++ ret = devm_iio_map_array_register(&pdev->dev, indio_dev, NULL); ++ if (ret < 0) ++ return ret; ++ ++ return devm_iio_device_register(&pdev->dev, indio_dev); ++} ++ ++static struct platform_driver spm_p1_adc_driver = { ++ .probe = spm_p1_adc_probe, ++ .driver = { ++ .name = "spm-p1-adc", ++ .of_match_table = of_match_ptr(spm_p1_adc_id_table), ++ }, ++}; ++module_platform_driver(spm_p1_adc_driver); ++ ++MODULE_DESCRIPTION("Spacemit p1 adc driver"); ++MODULE_LICENSE("GPL"); diff --git a/drivers/iio/adc/th1520-adc.c b/drivers/iio/adc/th1520-adc.c new file mode 100644 index 000000000000..ca65ed112b37 @@ -350756,11 +377542,304 @@ index 000000000000..8a4f21e1cb17 + /* lock to protect against multiple access to the device */ + struct mutex mlock; +}; +diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig +index 6ba984d7f0b1..64fb6e48d748 100644 +--- a/drivers/input/misc/Kconfig ++++ b/drivers/input/misc/Kconfig +@@ -939,4 +939,14 @@ config INPUT_STPMIC1_ONKEY + To compile this driver as a module, choose M here: the + module will be called stpmic1_onkey. + ++config INPUT_SPACEMIT_P1_PWRKEY ++ tristate "Spacemit p1 power-key support" ++ depends on MFD_SPACEMIT_P1 ++ help ++ Support the power-key of Spacemit P1 PMICs as an input device ++ reporting power button status. ++ ++ To compile this driver as a module, choose M here: the module ++ will be called spacemit-p1-pwrkey. ++ + endif +diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile +index 04296a4abe8e..452602d24903 100644 +--- a/drivers/input/misc/Makefile ++++ b/drivers/input/misc/Makefile +@@ -90,3 +90,4 @@ obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o + obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o + obj-$(CONFIG_INPUT_YEALINK) += yealink.o + obj-$(CONFIG_INPUT_IDEAPAD_SLIDEBAR) += ideapad_slidebar.o ++obj-$(CONFIG_INPUT_SPACEMIT_P1_PWRKEY) += spacemit-p1-pwrkey.o +\ No newline at end of file +diff --git a/drivers/input/misc/spacemit-p1-pwrkey.c b/drivers/input/misc/spacemit-p1-pwrkey.c +new file mode 100644 +index 000000000000..cfc44b7901ae +--- /dev/null ++++ b/drivers/input/misc/spacemit-p1-pwrkey.c +@@ -0,0 +1,211 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * power-key driver for Spacemit P1 ++ * ++ * Copyright (c) 2023, SPACEMIT Co., Ltd ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static int report_event, fall_triggered; ++static struct notifier_block pm_notify; ++static spinlock_t pm_lock; ++ ++static irqreturn_t pwrkey_fall_irq(int irq, void *_pwr) ++{ ++ unsigned long flags; ++ struct input_dev *pwr = _pwr; ++ ++ spin_lock_irqsave(&pm_lock, flags); ++ if (report_event) { ++ input_report_key(pwr, KEY_POWER, 1); ++ input_sync(pwr); ++ fall_triggered = 1; ++ } ++ ++ pm_wakeup_event(pwr->dev.parent, 0); ++ ++ spin_unlock_irqrestore(&pm_lock, flags); ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t pwrkey_rise_irq(int irq, void *_pwr) ++{ ++ unsigned long flags; ++ struct input_dev *pwr = _pwr; ++ ++ spin_lock_irqsave(&pm_lock, flags); ++ /* report key up if key down has been reported */ ++ if (fall_triggered) { ++ input_report_key(pwr, KEY_POWER, 0); ++ input_sync(pwr); ++ fall_triggered = 0; ++ } ++ ++ pm_wakeup_event(pwr->dev.parent, 0); ++ ++ spin_unlock_irqrestore(&pm_lock, flags); ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t pwrkey_skey_irq(int irq, void *_pwr) ++{ ++ /* do nothing by now */ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t pwrkey_lkey_irq(int irq, void *_pwr) ++{ ++ /* do nothing by now */ ++ return IRQ_HANDLED; ++} ++ ++static int pwrk_pm_notify(struct notifier_block *notify_block, ++ unsigned long mode, void *unused) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&pm_lock, flags); ++ ++ switch (mode) { ++ case PM_SUSPEND_PREPARE: ++ /* don't report power-key when enter suspend */ ++ report_event = 0; ++ break; ++ ++ case PM_POST_SUSPEND: ++ /* restore report power-key */ ++ report_event = 1; ++ break; ++ } ++ ++ spin_unlock_irqrestore(&pm_lock, flags); ++ ++ return 0; ++} ++ ++static int spm_p1_pwrkey_probe(struct platform_device *pdev) ++{ ++ int err; ++ struct input_dev *pwr; ++ int rise_irq, fall_irq, s_key_irq, l_key_irq; ++ ++ report_event = 1; ++ fall_triggered = 0; ++ ++ pwr = devm_input_allocate_device(&pdev->dev); ++ if (!pwr) { ++ dev_err(&pdev->dev, "Can't allocate power button\n"); ++ return -ENOMEM; ++ } ++ ++ pwr->name = "spm_p1 pwrkey"; ++ pwr->phys = "spm_p1_pwrkey/input0"; ++ pwr->id.bustype = BUS_HOST; ++ input_set_capability(pwr, EV_KEY, KEY_POWER); ++ ++ rise_irq = platform_get_irq(pdev, 0); ++ if (rise_irq < 0) ++ return rise_irq; ++ ++ fall_irq = platform_get_irq(pdev, 1); ++ if (fall_irq < 0) ++ return fall_irq; ++ ++ s_key_irq = platform_get_irq(pdev, 2); ++ if (s_key_irq < 0) ++ return s_key_irq; ++ ++ l_key_irq = platform_get_irq(pdev, 3); ++ if (l_key_irq < 0) ++ return l_key_irq; ++ ++ err = devm_request_any_context_irq(&pwr->dev, rise_irq, ++ pwrkey_rise_irq, ++ IRQF_TRIGGER_NONE | IRQF_ONESHOT, ++ "spm_p1_pwrkey_rise", pwr); ++ if (err < 0) { ++ dev_err(&pdev->dev, "Can't register rise irq: %d\n", err); ++ return err; ++ } ++ ++ err = devm_request_any_context_irq(&pwr->dev, fall_irq, ++ pwrkey_fall_irq, ++ IRQF_TRIGGER_NONE | IRQF_ONESHOT, ++ "spm_p1_pwrkey_fall", pwr); ++ if (err < 0) { ++ dev_err(&pdev->dev, "Can't register fall irq: %d\n", err); ++ return err; ++ } ++ ++ err = devm_request_any_context_irq(&pwr->dev, s_key_irq, ++ pwrkey_skey_irq, ++ IRQF_TRIGGER_NONE | IRQF_ONESHOT, ++ "spm_p1_pwrkey_skey", pwr); ++ if (err < 0) { ++ dev_err(&pdev->dev, "Can't register skey irq: %d\n", err); ++ return err; ++ } ++ ++ err = devm_request_any_context_irq(&pwr->dev, l_key_irq, ++ pwrkey_lkey_irq, ++ IRQF_TRIGGER_NONE | IRQF_ONESHOT, ++ "spm_p1_pwrkey_lkey", pwr); ++ if (err < 0) { ++ dev_err(&pdev->dev, "Can't register lkey irq: %d\n", err); ++ return err; ++ } ++ ++ err = input_register_device(pwr); ++ if (err) { ++ dev_err(&pdev->dev, "Can't register power button: %d\n", err); ++ return err; ++ } ++ ++ platform_set_drvdata(pdev, pwr); ++ dev_pm_set_wake_irq(&pdev->dev, fall_irq); ++ device_init_wakeup(&pdev->dev, true); ++ ++ spin_lock_init(&pm_lock); ++ ++ pm_notify.notifier_call = pwrk_pm_notify; ++ err = register_pm_notifier(&pm_notify); ++ if (err) { ++ dev_err(&pdev->dev, "Register pm notifier failed\n"); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static const struct of_device_id spm_p1_pwrkey_id_table[] = { ++ { .compatible = "spacemit,p1,pwrkey", }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, spm_p1_pwrkey_id_table); ++ ++static struct platform_driver spm_p1_pwrkey_driver = { ++ .probe = spm_p1_pwrkey_probe, ++ .driver = { ++ .name = "spm-p1-pwrkey", ++ .of_match_table = of_match_ptr(spm_p1_pwrkey_id_table), ++ }, ++}; ++module_platform_driver(spm_p1_pwrkey_driver); ++ ++MODULE_DESCRIPTION("Spacemit P1 Power Key driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig +index 56eafa478c34..9b2fa5a6ae29 100644 +--- a/drivers/iommu/Kconfig ++++ b/drivers/iommu/Kconfig +@@ -196,6 +196,7 @@ source "drivers/iommu/amd/Kconfig" + source "drivers/iommu/intel/Kconfig" + source "drivers/iommu/iommufd/Kconfig" + source "drivers/iommu/sw64/Kconfig" ++source "drivers/iommu/riscv/Kconfig" + + config IRQ_REMAP + bool "Support for Interrupt Remapping" +diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile +index e3ecb2040808..6fb28db17118 100644 +--- a/drivers/iommu/Makefile ++++ b/drivers/iommu/Makefile +@@ -1,5 +1,5 @@ + # SPDX-License-Identifier: GPL-2.0 +-obj-y += amd/ intel/ arm/ iommufd/ sw64/ ++obj-y += amd/ intel/ arm/ iommufd/ sw64/ riscv/ + obj-$(CONFIG_IOMMU_API) += iommu.o + obj-$(CONFIG_IOMMU_API) += iommu-traces.o + obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o +diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c +index ef3ee95706da..eb1e62cd499a 100644 +--- a/drivers/iommu/apple-dart.c ++++ b/drivers/iommu/apple-dart.c +@@ -779,7 +779,8 @@ static void apple_dart_domain_free(struct iommu_domain *domain) + kfree(dart_domain); + } + +-static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args) ++static int apple_dart_of_xlate(struct device *dev, ++ const struct of_phandle_args *args) + { + struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev); + struct platform_device *iommu_pdev = of_find_device_by_node(args->np); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c -index 6ef522ce745b..d16d701a4447 100644 +index 1569090b1b12..1fdc413e2aa2 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c -@@ -4320,7 +4320,8 @@ static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr) +@@ -3912,7 +3912,8 @@ static int arm_smmu_clear_dirty_log(struct iommu_domain *domain, + } + #endif + +-static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) ++static int arm_smmu_of_xlate(struct device *dev, ++ const struct of_phandle_args *args) + { + return iommu_fwspec_add_ids(dev, args->args, 1); + } +@@ -4459,7 +4460,8 @@ static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr) static void arm_smmu_free_msis(void *data) { struct device *dev = data; @@ -350770,7 +377849,7 @@ index 6ef522ce745b..d16d701a4447 100644 } static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) -@@ -4377,7 +4378,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) +@@ -4516,7 +4518,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) } /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */ @@ -350779,11 +377858,3612 @@ index 6ef522ce745b..d16d701a4447 100644 if (ret) { dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n"); return; +diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c +index 4598ac7aee81..b101318246d6 100644 +--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c ++++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c +@@ -1565,7 +1565,8 @@ static int arm_smmu_set_pgtable_quirks(struct iommu_domain *domain, + return ret; + } + +-static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) ++static int arm_smmu_of_xlate(struct device *dev, ++ const struct of_phandle_args *args) + { + u32 mask, fwid = 0; + +diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c +index 33f3c870086c..52f996ef9ca4 100644 +--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c ++++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c +@@ -546,7 +546,8 @@ static struct iommu_device *qcom_iommu_probe_device(struct device *dev) + return &qcom_iommu->iommu; + } + +-static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) ++static int qcom_iommu_of_xlate(struct device *dev, ++ const struct of_phandle_args *args) + { + struct qcom_iommu_dev *qcom_iommu; + struct platform_device *iommu_pdev; +diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c +index 2c6e9094f1e9..d98c9161948a 100644 +--- a/drivers/iommu/exynos-iommu.c ++++ b/drivers/iommu/exynos-iommu.c +@@ -1431,7 +1431,7 @@ static void exynos_iommu_release_device(struct device *dev) + } + + static int exynos_iommu_of_xlate(struct device *dev, +- struct of_phandle_args *spec) ++ const struct of_phandle_args *spec) + { + struct platform_device *sysmmu = of_find_device_by_node(spec->np); + struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); +diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c +index e14d496a9cbd..2bb40c1d7843 100644 +--- a/drivers/iommu/intel/dmar.c ++++ b/drivers/iommu/intel/dmar.c +@@ -32,6 +32,7 @@ + + #include "iommu.h" + #include "../irq_remapping.h" ++#include "../iommu-pages.h" + #include "perf.h" + #include "trace.h" + #include "perfmon.h" +@@ -1242,7 +1243,7 @@ static void free_iommu(struct intel_iommu *iommu) + } + + if (iommu->qi) { +- free_page((unsigned long)iommu->qi->desc); ++ iommu_free_page(iommu->qi->desc); + kfree(iommu->qi->desc_status); + kfree(iommu->qi); + } +@@ -1794,7 +1795,8 @@ static void __dmar_enable_qi(struct intel_iommu *iommu) + int dmar_enable_qi(struct intel_iommu *iommu) + { + struct q_inval *qi; +- struct page *desc_page; ++ void *desc; ++ int order; + + if (!ecap_qis(iommu->ecap)) + return -ENOENT; +@@ -1815,19 +1817,19 @@ int dmar_enable_qi(struct intel_iommu *iommu) + * Need two pages to accommodate 256 descriptors of 256 bits each + * if the remapping hardware supports scalable mode translation. + */ +- desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, +- !!ecap_smts(iommu->ecap)); +- if (!desc_page) { ++ order = ecap_smts(iommu->ecap) ? 1 : 0; ++ desc = iommu_alloc_pages_node(iommu->node, GFP_ATOMIC, order); ++ if (!desc) { + kfree(qi); + iommu->qi = NULL; + return -ENOMEM; + } + +- qi->desc = page_address(desc_page); ++ qi->desc = desc; + + qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC); + if (!qi->desc_status) { +- free_page((unsigned long) qi->desc); ++ iommu_free_page(qi->desc); + kfree(qi); + iommu->qi = NULL; + return -ENOMEM; +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c +index bb23fc0f4832..124e2dc151f5 100644 +--- a/drivers/iommu/intel/iommu.c ++++ b/drivers/iommu/intel/iommu.c +@@ -27,6 +27,7 @@ + #include "iommu.h" + #include "../dma-iommu.h" + #include "../irq_remapping.h" ++#include "../iommu-pages.h" + #include "pasid.h" + #include "cap_audit.h" + #include "perfmon.h" +@@ -223,22 +224,6 @@ static int __init intel_iommu_setup(char *str) + } + __setup("intel_iommu=", intel_iommu_setup); + +-void *alloc_pgtable_page(int node, gfp_t gfp) +-{ +- struct page *page; +- void *vaddr = NULL; +- +- page = alloc_pages_node(node, gfp | __GFP_ZERO, 0); +- if (page) +- vaddr = page_address(page); +- return vaddr; +-} +- +-void free_pgtable_page(void *vaddr) +-{ +- free_page((unsigned long)vaddr); +-} +- + static int domain_type_is_si(struct dmar_domain *domain) + { + return domain->domain.type == IOMMU_DOMAIN_IDENTITY; +@@ -470,7 +455,7 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, + if (!alloc) + return NULL; + +- context = alloc_pgtable_page(iommu->node, GFP_ATOMIC); ++ context = iommu_alloc_page_node(iommu->node, GFP_ATOMIC); + if (!context) + return NULL; + +@@ -644,17 +629,17 @@ static void free_context_table(struct intel_iommu *iommu) + for (i = 0; i < ROOT_ENTRY_NR; i++) { + context = iommu_context_addr(iommu, i, 0, 0); + if (context) +- free_pgtable_page(context); ++ iommu_free_page(context); + + if (!sm_supported(iommu)) + continue; + + context = iommu_context_addr(iommu, i, 0x80, 0); + if (context) +- free_pgtable_page(context); ++ iommu_free_page(context); + } + +- free_pgtable_page(iommu->root_entry); ++ iommu_free_page(iommu->root_entry); + iommu->root_entry = NULL; + } + +@@ -804,7 +789,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, + if (!dma_pte_present(pte)) { + uint64_t pteval; + +- tmp_page = alloc_pgtable_page(domain->nid, gfp); ++ tmp_page = iommu_alloc_page_node(domain->nid, gfp); + + if (!tmp_page) + return NULL; +@@ -816,7 +801,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, + + if (cmpxchg64(&pte->val, 0ULL, pteval)) + /* Someone else set it while we were thinking; use theirs. */ +- free_pgtable_page(tmp_page); ++ iommu_free_page(tmp_page); + else + domain_flush_cache(domain, pte, sizeof(*pte)); + } +@@ -929,7 +914,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level, + last_pfn < level_pfn + level_size(level) - 1)) { + dma_clear_pte(pte); + domain_flush_cache(domain, pte, sizeof(*pte)); +- free_pgtable_page(level_pte); ++ iommu_free_page(level_pte); + } + next: + pfn += level_size(level); +@@ -953,7 +938,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, + + /* free pgd */ + if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { +- free_pgtable_page(domain->pgd); ++ iommu_free_page(domain->pgd); + domain->pgd = NULL; + } + } +@@ -1055,7 +1040,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) + { + struct root_entry *root; + +- root = alloc_pgtable_page(iommu->node, GFP_ATOMIC); ++ root = iommu_alloc_page_node(iommu->node, GFP_ATOMIC); + if (!root) { + pr_err("Allocating root entry for %s failed\n", + iommu->name); +@@ -1778,7 +1763,7 @@ static void domain_exit(struct dmar_domain *domain) + LIST_HEAD(freelist); + + domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist); +- put_pages_list(&freelist); ++ iommu_put_pages_list(&freelist); + } + + if (WARN_ON(!list_empty(&domain->devices))) +@@ -2515,7 +2500,7 @@ static int copy_context_table(struct intel_iommu *iommu, + if (!old_ce) + goto out; + +- new_ce = alloc_pgtable_page(iommu->node, GFP_KERNEL); ++ new_ce = iommu_alloc_page_node(iommu->node, GFP_KERNEL); + if (!new_ce) + goto out_unmap; + +@@ -3466,7 +3451,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb, + start_vpfn, mhp->nr_pages, + list_empty(&freelist), 0); + rcu_read_unlock(); +- put_pages_list(&freelist); ++ iommu_put_pages_list(&freelist); + } + break; + } +@@ -3916,7 +3901,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) + domain->max_addr = 0; + + /* always allocate the top pgd */ +- domain->pgd = alloc_pgtable_page(domain->nid, GFP_ATOMIC); ++ domain->pgd = iommu_alloc_page_node(domain->nid, GFP_ATOMIC); + if (!domain->pgd) + return -ENOMEM; + domain_flush_cache(domain, domain->pgd, PAGE_SIZE); +@@ -4070,7 +4055,7 @@ int prepare_domain_attach_device(struct iommu_domain *domain, + pte = dmar_domain->pgd; + if (dma_pte_present(pte)) { + dmar_domain->pgd = phys_to_virt(dma_pte_addr(pte)); +- free_pgtable_page(pte); ++ iommu_free_page(pte); + } + dmar_domain->agaw--; + } +@@ -4220,7 +4205,7 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain, + if (dmar_domain->nested_parent) + parent_domain_flush(dmar_domain, start_pfn, nrpages, + list_empty(&gather->freelist)); +- put_pages_list(&gather->freelist); ++ iommu_put_pages_list(&gather->freelist); + } + + static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, +diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h +index eaef932ad7c9..5307e144ca12 100644 +--- a/drivers/iommu/intel/iommu.h ++++ b/drivers/iommu/intel/iommu.h +@@ -1076,8 +1076,6 @@ void domain_update_iommu_cap(struct dmar_domain *domain); + + int dmar_ir_support(void); + +-void *alloc_pgtable_page(int node, gfp_t gfp); +-void free_pgtable_page(void *vaddr); + void iommu_flush_write_buffer(struct intel_iommu *iommu); + struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent, + const struct iommu_user_data *user_data); +diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c +index 29b9e55dcf26..f4ddd4d9a825 100644 +--- a/drivers/iommu/intel/irq_remapping.c ++++ b/drivers/iommu/intel/irq_remapping.c +@@ -22,6 +22,7 @@ + + #include "iommu.h" + #include "../irq_remapping.h" ++#include "../iommu-pages.h" + #include "cap_audit.h" + + enum irq_mode { +@@ -527,7 +528,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) + struct ir_table *ir_table; + struct fwnode_handle *fn; + unsigned long *bitmap; +- struct page *pages; ++ void *ir_table_base; + + if (iommu->ir_table) + return 0; +@@ -536,9 +537,9 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) + if (!ir_table) + return -ENOMEM; + +- pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, +- INTR_REMAP_PAGE_ORDER); +- if (!pages) { ++ ir_table_base = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, ++ INTR_REMAP_PAGE_ORDER); ++ if (!ir_table_base) { + pr_err("IR%d: failed to allocate pages of order %d\n", + iommu->seq_id, INTR_REMAP_PAGE_ORDER); + goto out_free_table; +@@ -573,7 +574,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) + else + iommu->ir_domain->msi_parent_ops = &dmar_msi_parent_ops; + +- ir_table->base = page_address(pages); ++ ir_table->base = ir_table_base; + ir_table->bitmap = bitmap; + iommu->ir_table = ir_table; + +@@ -622,7 +623,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) + out_free_bitmap: + bitmap_free(bitmap); + out_free_pages: +- __free_pages(pages, INTR_REMAP_PAGE_ORDER); ++ iommu_free_pages(ir_table_base, INTR_REMAP_PAGE_ORDER); + out_free_table: + kfree(ir_table); + +@@ -643,8 +644,7 @@ static void intel_teardown_irq_remapping(struct intel_iommu *iommu) + irq_domain_free_fwnode(fn); + iommu->ir_domain = NULL; + } +- free_pages((unsigned long)iommu->ir_table->base, +- INTR_REMAP_PAGE_ORDER); ++ iommu_free_pages(iommu->ir_table->base, INTR_REMAP_PAGE_ORDER); + bitmap_free(iommu->ir_table->bitmap); + kfree(iommu->ir_table); + iommu->ir_table = NULL; +diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c +index a51e895d9a17..6ef582bfaea5 100644 +--- a/drivers/iommu/intel/pasid.c ++++ b/drivers/iommu/intel/pasid.c +@@ -20,6 +20,7 @@ + + #include "iommu.h" + #include "pasid.h" ++#include "../iommu-pages.h" + + /* + * Intel IOMMU system wide PASID name space: +@@ -38,7 +39,7 @@ int intel_pasid_alloc_table(struct device *dev) + { + struct device_domain_info *info; + struct pasid_table *pasid_table; +- struct page *pages; ++ struct pasid_dir_entry *dir; + u32 max_pasid = 0; + int order, size; + +@@ -59,14 +60,13 @@ int intel_pasid_alloc_table(struct device *dev) + + size = max_pasid >> (PASID_PDE_SHIFT - 3); + order = size ? get_order(size) : 0; +- pages = alloc_pages_node(info->iommu->node, +- GFP_KERNEL | __GFP_ZERO, order); +- if (!pages) { ++ dir = iommu_alloc_pages_node(info->iommu->node, GFP_KERNEL, order); ++ if (!dir) { + kfree(pasid_table); + return -ENOMEM; + } + +- pasid_table->table = page_address(pages); ++ pasid_table->table = dir; + pasid_table->order = order; + pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3); + info->pasid_table = pasid_table; +@@ -97,10 +97,10 @@ void intel_pasid_free_table(struct device *dev) + max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT; + for (i = 0; i < max_pde; i++) { + table = get_pasid_table_from_pde(&dir[i]); +- free_pgtable_page(table); ++ iommu_free_page(table); + } + +- free_pages((unsigned long)pasid_table->table, pasid_table->order); ++ iommu_free_pages(pasid_table->table, pasid_table->order); + kfree(pasid_table); + } + +@@ -146,7 +146,7 @@ static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid) + retry: + entries = get_pasid_table_from_pde(&dir[dir_index]); + if (!entries) { +- entries = alloc_pgtable_page(info->iommu->node, GFP_ATOMIC); ++ entries = iommu_alloc_page_node(info->iommu->node, GFP_ATOMIC); + if (!entries) + return NULL; + +@@ -158,7 +158,7 @@ static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid) + */ + if (cmpxchg64(&dir[dir_index].val, 0ULL, + (u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) { +- free_pgtable_page(entries); ++ iommu_free_page(entries); + goto retry; + } + if (!ecap_coherent(info->iommu->ecap)) { +diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c +index 4cc6c814f80e..8d7b8170f5f3 100644 +--- a/drivers/iommu/intel/svm.c ++++ b/drivers/iommu/intel/svm.c +@@ -22,6 +22,7 @@ + #include "iommu.h" + #include "pasid.h" + #include "perf.h" ++#include "../iommu-pages.h" + #include "trace.h" + + static irqreturn_t prq_event_thread(int irq, void *d); +@@ -63,16 +64,14 @@ svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev) + int intel_svm_enable_prq(struct intel_iommu *iommu) + { + struct iopf_queue *iopfq; +- struct page *pages; + int irq, ret; + +- pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, PRQ_ORDER); +- if (!pages) { ++ iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER); ++ if (!iommu->prq) { + pr_warn("IOMMU: %s: Failed to allocate page request queue\n", + iommu->name); + return -ENOMEM; + } +- iommu->prq = page_address(pages); + + irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu); + if (irq <= 0) { +@@ -117,7 +116,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu) + dmar_free_hwirq(irq); + iommu->pr_irq = 0; + free_prq: +- free_pages((unsigned long)iommu->prq, PRQ_ORDER); ++ iommu_free_pages(iommu->prq, PRQ_ORDER); + iommu->prq = NULL; + + return ret; +@@ -140,7 +139,7 @@ int intel_svm_finish_prq(struct intel_iommu *iommu) + iommu->iopf_queue = NULL; + } + +- free_pages((unsigned long)iommu->prq, PRQ_ORDER); ++ iommu_free_pages(iommu->prq, PRQ_ORDER); + iommu->prq = NULL; + + return 0; +diff --git a/drivers/iommu/iommu-pages.h b/drivers/iommu/iommu-pages.h +new file mode 100644 +index 000000000000..5a222d0ad25c +--- /dev/null ++++ b/drivers/iommu/iommu-pages.h +@@ -0,0 +1,154 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright (c) 2024, Google LLC. ++ * Pasha Tatashin ++ */ ++ ++#ifndef __IOMMU_PAGES_H ++#define __IOMMU_PAGES_H ++ ++#include ++#include ++#include ++ ++/* ++ * All page allocations that should be reported to as "iommu-pagetables" to ++ * userspace must use one of the functions below. This includes allocations of ++ * page-tables and other per-iommu_domain configuration structures. ++ * ++ * This is necessary for the proper accounting as IOMMU state can be rather ++ * large, i.e. multiple gigabytes in size. ++ */ ++ ++/** ++ * __iommu_alloc_pages - allocate a zeroed page of a given order. ++ * @gfp: buddy allocator flags ++ * @order: page order ++ * ++ * returns the head struct page of the allocated page. ++ */ ++static inline struct page *__iommu_alloc_pages(gfp_t gfp, int order) ++{ ++ struct page *page; ++ ++ page = alloc_pages(gfp | __GFP_ZERO, order); ++ if (unlikely(!page)) ++ return NULL; ++ ++ return page; ++} ++ ++/** ++ * __iommu_free_pages - free page of a given order ++ * @page: head struct page of the page ++ * @order: page order ++ */ ++static inline void __iommu_free_pages(struct page *page, int order) ++{ ++ if (!page) ++ return; ++ ++ __free_pages(page, order); ++} ++ ++/** ++ * iommu_alloc_pages_node - allocate a zeroed page of a given order from ++ * specific NUMA node. ++ * @nid: memory NUMA node id ++ * @gfp: buddy allocator flags ++ * @order: page order ++ * ++ * returns the virtual address of the allocated page ++ */ ++static inline void *iommu_alloc_pages_node(int nid, gfp_t gfp, int order) ++{ ++ struct page *page = alloc_pages_node(nid, gfp | __GFP_ZERO, order); ++ ++ if (unlikely(!page)) ++ return NULL; ++ ++ return page_address(page); ++} ++ ++/** ++ * iommu_alloc_pages - allocate a zeroed page of a given order ++ * @gfp: buddy allocator flags ++ * @order: page order ++ * ++ * returns the virtual address of the allocated page ++ */ ++static inline void *iommu_alloc_pages(gfp_t gfp, int order) ++{ ++ struct page *page = __iommu_alloc_pages(gfp, order); ++ ++ if (unlikely(!page)) ++ return NULL; ++ ++ return page_address(page); ++} ++ ++/** ++ * iommu_alloc_page_node - allocate a zeroed page at specific NUMA node. ++ * @nid: memory NUMA node id ++ * @gfp: buddy allocator flags ++ * ++ * returns the virtual address of the allocated page ++ */ ++static inline void *iommu_alloc_page_node(int nid, gfp_t gfp) ++{ ++ return iommu_alloc_pages_node(nid, gfp, 0); ++} ++ ++/** ++ * iommu_alloc_page - allocate a zeroed page ++ * @gfp: buddy allocator flags ++ * ++ * returns the virtual address of the allocated page ++ */ ++static inline void *iommu_alloc_page(gfp_t gfp) ++{ ++ return iommu_alloc_pages(gfp, 0); ++} ++ ++/** ++ * iommu_free_pages - free page of a given order ++ * @virt: virtual address of the page to be freed. ++ * @order: page order ++ */ ++static inline void iommu_free_pages(void *virt, int order) ++{ ++ if (!virt) ++ return; ++ ++ __iommu_free_pages(virt_to_page(virt), order); ++} ++ ++/** ++ * iommu_free_page - free page ++ * @virt: virtual address of the page to be freed. ++ */ ++static inline void iommu_free_page(void *virt) ++{ ++ iommu_free_pages(virt, 0); ++} ++ ++/** ++ * iommu_put_pages_list - free a list of pages. ++ * @page: the head of the lru list to be freed. ++ * ++ * There are no locking requirement for these pages, as they are going to be ++ * put on a free list as soon as refcount reaches 0. Pages are put on this LRU ++ * list once they are removed from the IOMMU page tables. However, they can ++ * still be access through debugfs. ++ */ ++static inline void iommu_put_pages_list(struct list_head *page) ++{ ++ while (!list_empty(page)) { ++ struct page *p = list_entry(page->prev, struct page, lru); ++ ++ list_del(&p->lru); ++ put_page(p); ++ } ++} ++ ++#endif /* __IOMMU_PAGES_H */ +diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c +index e9f9e8a23006..94b926bbac67 100644 +--- a/drivers/iommu/iommu.c ++++ b/drivers/iommu/iommu.c +@@ -3090,7 +3090,7 @@ void iommu_fwspec_free(struct device *dev) + } + EXPORT_SYMBOL_GPL(iommu_fwspec_free); + +-int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) ++int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids) + { + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + int i, new_num; +diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c +index ace1fc4bd34b..cd7219319c8b 100644 +--- a/drivers/iommu/ipmmu-vmsa.c ++++ b/drivers/iommu/ipmmu-vmsa.c +@@ -709,7 +709,7 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, + } + + static int ipmmu_init_platform_device(struct device *dev, +- struct of_phandle_args *args) ++ const struct of_phandle_args *args) + { + struct platform_device *ipmmu_pdev; + +@@ -773,7 +773,7 @@ static bool ipmmu_device_is_allowed(struct device *dev) + } + + static int ipmmu_of_xlate(struct device *dev, +- struct of_phandle_args *spec) ++ const struct of_phandle_args *spec) + { + if (!ipmmu_device_is_allowed(dev)) + return -ENODEV; +diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c +index f86af9815d6f..989e0869d805 100644 +--- a/drivers/iommu/msm_iommu.c ++++ b/drivers/iommu/msm_iommu.c +@@ -598,7 +598,7 @@ static void print_ctx_regs(void __iomem *base, int ctx) + + static int insert_iommu_master(struct device *dev, + struct msm_iommu_dev **iommu, +- struct of_phandle_args *spec) ++ const struct of_phandle_args *spec) + { + struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev); + int sid; +@@ -626,7 +626,7 @@ static int insert_iommu_master(struct device *dev, + } + + static int qcom_iommu_of_xlate(struct device *dev, +- struct of_phandle_args *spec) ++ const struct of_phandle_args *spec) + { + struct msm_iommu_dev *iommu = NULL, *iter; + unsigned long flags; +diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c +index 51d0eba8cbdf..358e8ee9506c 100644 +--- a/drivers/iommu/mtk_iommu.c ++++ b/drivers/iommu/mtk_iommu.c +@@ -957,7 +957,8 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev) + return group; + } + +-static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) ++static int mtk_iommu_of_xlate(struct device *dev, ++ const struct of_phandle_args *args) + { + struct platform_device *m4updev; + +diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c +index 32cc8341d372..0ddcd153b568 100644 +--- a/drivers/iommu/mtk_iommu_v1.c ++++ b/drivers/iommu/mtk_iommu_v1.c +@@ -398,7 +398,8 @@ static const struct iommu_ops mtk_iommu_v1_ops; + * MTK generation one iommu HW only support one iommu domain, and all the client + * sharing the same iova address space. + */ +-static int mtk_iommu_v1_create_mapping(struct device *dev, struct of_phandle_args *args) ++static int mtk_iommu_v1_create_mapping(struct device *dev, ++ const struct of_phandle_args *args) + { + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct mtk_iommu_v1_data *data; +diff --git a/drivers/iommu/riscv/Kconfig b/drivers/iommu/riscv/Kconfig +new file mode 100644 +index 000000000000..c071816f59a6 +--- /dev/null ++++ b/drivers/iommu/riscv/Kconfig +@@ -0,0 +1,20 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++# RISC-V IOMMU support ++ ++config RISCV_IOMMU ++ bool "RISC-V IOMMU Support" ++ depends on RISCV && 64BIT ++ default y ++ select IOMMU_API ++ help ++ Support for implementations of the RISC-V IOMMU architecture that ++ complements the RISC-V MMU capabilities, providing similar address ++ translation and protection functions for accesses from I/O devices. ++ ++ Say Y here if your SoC includes an IOMMU device implementing ++ the RISC-V IOMMU architecture. ++ ++config RISCV_IOMMU_PCI ++ def_bool y if RISCV_IOMMU && PCI_MSI ++ help ++ Support for the PCIe implementation of RISC-V IOMMU architecture. +diff --git a/drivers/iommu/riscv/Makefile b/drivers/iommu/riscv/Makefile +new file mode 100644 +index 000000000000..f54c9ed17d41 +--- /dev/null ++++ b/drivers/iommu/riscv/Makefile +@@ -0,0 +1,3 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++obj-$(CONFIG_RISCV_IOMMU) += iommu.o iommu-platform.o ++obj-$(CONFIG_RISCV_IOMMU_PCI) += iommu-pci.o +diff --git a/drivers/iommu/riscv/iommu-bits.h b/drivers/iommu/riscv/iommu-bits.h +new file mode 100644 +index 000000000000..98daf0e1a306 +--- /dev/null ++++ b/drivers/iommu/riscv/iommu-bits.h +@@ -0,0 +1,784 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright © 2022-2024 Rivos Inc. ++ * Copyright © 2023 FORTH-ICS/CARV ++ * Copyright © 2023 RISC-V IOMMU Task Group ++ * ++ * RISC-V IOMMU - Register Layout and Data Structures. ++ * ++ * Based on the 'RISC-V IOMMU Architecture Specification', Version 1.0 ++ * Published at https://github.com/riscv-non-isa/riscv-iommu ++ * ++ */ ++ ++#ifndef _RISCV_IOMMU_BITS_H_ ++#define _RISCV_IOMMU_BITS_H_ ++ ++#include ++#include ++#include ++ ++/* ++ * Chapter 5: Memory Mapped register interface ++ */ ++ ++/* Common field positions */ ++#define RISCV_IOMMU_PPN_FIELD GENMASK_ULL(53, 10) ++#define RISCV_IOMMU_QUEUE_LOG2SZ_FIELD GENMASK_ULL(4, 0) ++#define RISCV_IOMMU_QUEUE_INDEX_FIELD GENMASK_ULL(31, 0) ++#define RISCV_IOMMU_QUEUE_ENABLE BIT(0) ++#define RISCV_IOMMU_QUEUE_INTR_ENABLE BIT(1) ++#define RISCV_IOMMU_QUEUE_MEM_FAULT BIT(8) ++#define RISCV_IOMMU_QUEUE_OVERFLOW BIT(9) ++#define RISCV_IOMMU_QUEUE_ACTIVE BIT(16) ++#define RISCV_IOMMU_QUEUE_BUSY BIT(17) ++ ++#define RISCV_IOMMU_ATP_PPN_FIELD GENMASK_ULL(43, 0) ++#define RISCV_IOMMU_ATP_MODE_FIELD GENMASK_ULL(63, 60) ++ ++/* 5.3 IOMMU Capabilities (64bits) */ ++#define RISCV_IOMMU_REG_CAPABILITIES 0x0000 ++#define RISCV_IOMMU_CAPABILITIES_VERSION GENMASK_ULL(7, 0) ++#define RISCV_IOMMU_CAPABILITIES_SV32 BIT_ULL(8) ++#define RISCV_IOMMU_CAPABILITIES_SV39 BIT_ULL(9) ++#define RISCV_IOMMU_CAPABILITIES_SV48 BIT_ULL(10) ++#define RISCV_IOMMU_CAPABILITIES_SV57 BIT_ULL(11) ++#define RISCV_IOMMU_CAPABILITIES_SVPBMT BIT_ULL(15) ++#define RISCV_IOMMU_CAPABILITIES_SV32X4 BIT_ULL(16) ++#define RISCV_IOMMU_CAPABILITIES_SV39X4 BIT_ULL(17) ++#define RISCV_IOMMU_CAPABILITIES_SV48X4 BIT_ULL(18) ++#define RISCV_IOMMU_CAPABILITIES_SV57X4 BIT_ULL(19) ++#define RISCV_IOMMU_CAPABILITIES_AMO_MRIF BIT_ULL(21) ++#define RISCV_IOMMU_CAPABILITIES_MSI_FLAT BIT_ULL(22) ++#define RISCV_IOMMU_CAPABILITIES_MSI_MRIF BIT_ULL(23) ++#define RISCV_IOMMU_CAPABILITIES_AMO_HWAD BIT_ULL(24) ++#define RISCV_IOMMU_CAPABILITIES_ATS BIT_ULL(25) ++#define RISCV_IOMMU_CAPABILITIES_T2GPA BIT_ULL(26) ++#define RISCV_IOMMU_CAPABILITIES_END BIT_ULL(27) ++#define RISCV_IOMMU_CAPABILITIES_IGS GENMASK_ULL(29, 28) ++#define RISCV_IOMMU_CAPABILITIES_HPM BIT_ULL(30) ++#define RISCV_IOMMU_CAPABILITIES_DBG BIT_ULL(31) ++#define RISCV_IOMMU_CAPABILITIES_PAS GENMASK_ULL(37, 32) ++#define RISCV_IOMMU_CAPABILITIES_PD8 BIT_ULL(38) ++#define RISCV_IOMMU_CAPABILITIES_PD17 BIT_ULL(39) ++#define RISCV_IOMMU_CAPABILITIES_PD20 BIT_ULL(40) ++ ++/** ++ * enum riscv_iommu_igs_settings - Interrupt Generation Support Settings ++ * @RISCV_IOMMU_CAPABILITIES_IGS_MSI: IOMMU supports only MSI generation ++ * @RISCV_IOMMU_CAPABILITIES_IGS_WSI: IOMMU supports only Wired-Signaled interrupt ++ * @RISCV_IOMMU_CAPABILITIES_IGS_BOTH: IOMMU supports both MSI and WSI generation ++ * @RISCV_IOMMU_CAPABILITIES_IGS_RSRV: Reserved for standard use ++ */ ++enum riscv_iommu_igs_settings { ++ RISCV_IOMMU_CAPABILITIES_IGS_MSI = 0, ++ RISCV_IOMMU_CAPABILITIES_IGS_WSI = 1, ++ RISCV_IOMMU_CAPABILITIES_IGS_BOTH = 2, ++ RISCV_IOMMU_CAPABILITIES_IGS_RSRV = 3 ++}; ++ ++/* 5.4 Features control register (32bits) */ ++#define RISCV_IOMMU_REG_FCTL 0x0008 ++#define RISCV_IOMMU_FCTL_BE BIT(0) ++#define RISCV_IOMMU_FCTL_WSI BIT(1) ++#define RISCV_IOMMU_FCTL_GXL BIT(2) ++ ++/* 5.5 Device-directory-table pointer (64bits) */ ++#define RISCV_IOMMU_REG_DDTP 0x0010 ++#define RISCV_IOMMU_DDTP_IOMMU_MODE GENMASK_ULL(3, 0) ++#define RISCV_IOMMU_DDTP_BUSY BIT_ULL(4) ++#define RISCV_IOMMU_DDTP_PPN RISCV_IOMMU_PPN_FIELD ++ ++/** ++ * enum riscv_iommu_ddtp_modes - IOMMU translation modes ++ * @RISCV_IOMMU_DDTP_IOMMU_MODE_OFF: No inbound transactions allowed ++ * @RISCV_IOMMU_DDTP_IOMMU_MODE_BARE: Pass-through mode ++ * @RISCV_IOMMU_DDTP_IOMMU_MODE_1LVL: One-level DDT ++ * @RISCV_IOMMU_DDTP_IOMMU_MODE_2LVL: Two-level DDT ++ * @RISCV_IOMMU_DDTP_IOMMU_MODE_3LVL: Three-level DDT ++ * @RISCV_IOMMU_DDTP_IOMMU_MODE_MAX: Max value allowed by specification ++ */ ++enum riscv_iommu_ddtp_modes { ++ RISCV_IOMMU_DDTP_IOMMU_MODE_OFF = 0, ++ RISCV_IOMMU_DDTP_IOMMU_MODE_BARE = 1, ++ RISCV_IOMMU_DDTP_IOMMU_MODE_1LVL = 2, ++ RISCV_IOMMU_DDTP_IOMMU_MODE_2LVL = 3, ++ RISCV_IOMMU_DDTP_IOMMU_MODE_3LVL = 4, ++ RISCV_IOMMU_DDTP_IOMMU_MODE_MAX = 4 ++}; ++ ++/* 5.6 Command Queue Base (64bits) */ ++#define RISCV_IOMMU_REG_CQB 0x0018 ++#define RISCV_IOMMU_CQB_ENTRIES RISCV_IOMMU_QUEUE_LOG2SZ_FIELD ++#define RISCV_IOMMU_CQB_PPN RISCV_IOMMU_PPN_FIELD ++ ++/* 5.7 Command Queue head (32bits) */ ++#define RISCV_IOMMU_REG_CQH 0x0020 ++#define RISCV_IOMMU_CQH_INDEX RISCV_IOMMU_QUEUE_INDEX_FIELD ++ ++/* 5.8 Command Queue tail (32bits) */ ++#define RISCV_IOMMU_REG_CQT 0x0024 ++#define RISCV_IOMMU_CQT_INDEX RISCV_IOMMU_QUEUE_INDEX_FIELD ++ ++/* 5.9 Fault Queue Base (64bits) */ ++#define RISCV_IOMMU_REG_FQB 0x0028 ++#define RISCV_IOMMU_FQB_ENTRIES RISCV_IOMMU_QUEUE_LOG2SZ_FIELD ++#define RISCV_IOMMU_FQB_PPN RISCV_IOMMU_PPN_FIELD ++ ++/* 5.10 Fault Queue Head (32bits) */ ++#define RISCV_IOMMU_REG_FQH 0x0030 ++#define RISCV_IOMMU_FQH_INDEX RISCV_IOMMU_QUEUE_INDEX_FIELD ++ ++/* 5.11 Fault Queue tail (32bits) */ ++#define RISCV_IOMMU_REG_FQT 0x0034 ++#define RISCV_IOMMU_FQT_INDEX RISCV_IOMMU_QUEUE_INDEX_FIELD ++ ++/* 5.12 Page Request Queue base (64bits) */ ++#define RISCV_IOMMU_REG_PQB 0x0038 ++#define RISCV_IOMMU_PQB_ENTRIES RISCV_IOMMU_QUEUE_LOG2SZ_FIELD ++#define RISCV_IOMMU_PQB_PPN RISCV_IOMMU_PPN_FIELD ++ ++/* 5.13 Page Request Queue head (32bits) */ ++#define RISCV_IOMMU_REG_PQH 0x0040 ++#define RISCV_IOMMU_PQH_INDEX RISCV_IOMMU_QUEUE_INDEX_FIELD ++ ++/* 5.14 Page Request Queue tail (32bits) */ ++#define RISCV_IOMMU_REG_PQT 0x0044 ++#define RISCV_IOMMU_PQT_INDEX_MASK RISCV_IOMMU_QUEUE_INDEX_FIELD ++ ++/* 5.15 Command Queue CSR (32bits) */ ++#define RISCV_IOMMU_REG_CQCSR 0x0048 ++#define RISCV_IOMMU_CQCSR_CQEN RISCV_IOMMU_QUEUE_ENABLE ++#define RISCV_IOMMU_CQCSR_CIE RISCV_IOMMU_QUEUE_INTR_ENABLE ++#define RISCV_IOMMU_CQCSR_CQMF RISCV_IOMMU_QUEUE_MEM_FAULT ++#define RISCV_IOMMU_CQCSR_CMD_TO BIT(9) ++#define RISCV_IOMMU_CQCSR_CMD_ILL BIT(10) ++#define RISCV_IOMMU_CQCSR_FENCE_W_IP BIT(11) ++#define RISCV_IOMMU_CQCSR_CQON RISCV_IOMMU_QUEUE_ACTIVE ++#define RISCV_IOMMU_CQCSR_BUSY RISCV_IOMMU_QUEUE_BUSY ++ ++/* 5.16 Fault Queue CSR (32bits) */ ++#define RISCV_IOMMU_REG_FQCSR 0x004C ++#define RISCV_IOMMU_FQCSR_FQEN RISCV_IOMMU_QUEUE_ENABLE ++#define RISCV_IOMMU_FQCSR_FIE RISCV_IOMMU_QUEUE_INTR_ENABLE ++#define RISCV_IOMMU_FQCSR_FQMF RISCV_IOMMU_QUEUE_MEM_FAULT ++#define RISCV_IOMMU_FQCSR_FQOF RISCV_IOMMU_QUEUE_OVERFLOW ++#define RISCV_IOMMU_FQCSR_FQON RISCV_IOMMU_QUEUE_ACTIVE ++#define RISCV_IOMMU_FQCSR_BUSY RISCV_IOMMU_QUEUE_BUSY ++ ++/* 5.17 Page Request Queue CSR (32bits) */ ++#define RISCV_IOMMU_REG_PQCSR 0x0050 ++#define RISCV_IOMMU_PQCSR_PQEN RISCV_IOMMU_QUEUE_ENABLE ++#define RISCV_IOMMU_PQCSR_PIE RISCV_IOMMU_QUEUE_INTR_ENABLE ++#define RISCV_IOMMU_PQCSR_PQMF RISCV_IOMMU_QUEUE_MEM_FAULT ++#define RISCV_IOMMU_PQCSR_PQOF RISCV_IOMMU_QUEUE_OVERFLOW ++#define RISCV_IOMMU_PQCSR_PQON RISCV_IOMMU_QUEUE_ACTIVE ++#define RISCV_IOMMU_PQCSR_BUSY RISCV_IOMMU_QUEUE_BUSY ++ ++/* 5.18 Interrupt Pending Status (32bits) */ ++#define RISCV_IOMMU_REG_IPSR 0x0054 ++ ++#define RISCV_IOMMU_INTR_CQ 0 ++#define RISCV_IOMMU_INTR_FQ 1 ++#define RISCV_IOMMU_INTR_PM 2 ++#define RISCV_IOMMU_INTR_PQ 3 ++#define RISCV_IOMMU_INTR_COUNT 4 ++ ++#define RISCV_IOMMU_IPSR_CIP BIT(RISCV_IOMMU_INTR_CQ) ++#define RISCV_IOMMU_IPSR_FIP BIT(RISCV_IOMMU_INTR_FQ) ++#define RISCV_IOMMU_IPSR_PMIP BIT(RISCV_IOMMU_INTR_PM) ++#define RISCV_IOMMU_IPSR_PIP BIT(RISCV_IOMMU_INTR_PQ) ++ ++/* 5.19 Performance monitoring counter overflow status (32bits) */ ++#define RISCV_IOMMU_REG_IOCOUNTOVF 0x0058 ++#define RISCV_IOMMU_IOCOUNTOVF_CY BIT(0) ++#define RISCV_IOMMU_IOCOUNTOVF_HPM GENMASK_ULL(31, 1) ++ ++/* 5.20 Performance monitoring counter inhibits (32bits) */ ++#define RISCV_IOMMU_REG_IOCOUNTINH 0x005C ++#define RISCV_IOMMU_IOCOUNTINH_CY BIT(0) ++#define RISCV_IOMMU_IOCOUNTINH_HPM GENMASK(31, 1) ++ ++/* 5.21 Performance monitoring cycles counter (64bits) */ ++#define RISCV_IOMMU_REG_IOHPMCYCLES 0x0060 ++#define RISCV_IOMMU_IOHPMCYCLES_COUNTER GENMASK_ULL(62, 0) ++#define RISCV_IOMMU_IOHPMCYCLES_OF BIT_ULL(63) ++ ++/* 5.22 Performance monitoring event counters (31 * 64bits) */ ++#define RISCV_IOMMU_REG_IOHPMCTR_BASE 0x0068 ++#define RISCV_IOMMU_REG_IOHPMCTR(_n) (RISCV_IOMMU_REG_IOHPMCTR_BASE + ((_n) * 0x8)) ++ ++/* 5.23 Performance monitoring event selectors (31 * 64bits) */ ++#define RISCV_IOMMU_REG_IOHPMEVT_BASE 0x0160 ++#define RISCV_IOMMU_REG_IOHPMEVT(_n) (RISCV_IOMMU_REG_IOHPMEVT_BASE + ((_n) * 0x8)) ++#define RISCV_IOMMU_IOHPMEVT_EVENTID GENMASK_ULL(14, 0) ++#define RISCV_IOMMU_IOHPMEVT_DMASK BIT_ULL(15) ++#define RISCV_IOMMU_IOHPMEVT_PID_PSCID GENMASK_ULL(35, 16) ++#define RISCV_IOMMU_IOHPMEVT_DID_GSCID GENMASK_ULL(59, 36) ++#define RISCV_IOMMU_IOHPMEVT_PV_PSCV BIT_ULL(60) ++#define RISCV_IOMMU_IOHPMEVT_DV_GSCV BIT_ULL(61) ++#define RISCV_IOMMU_IOHPMEVT_IDT BIT_ULL(62) ++#define RISCV_IOMMU_IOHPMEVT_OF BIT_ULL(63) ++ ++/* Number of defined performance-monitoring event selectors */ ++#define RISCV_IOMMU_IOHPMEVT_CNT 31 ++ ++/** ++ * enum riscv_iommu_hpmevent_id - Performance-monitoring event identifier ++ * ++ * @RISCV_IOMMU_HPMEVENT_INVALID: Invalid event, do not count ++ * @RISCV_IOMMU_HPMEVENT_URQ: Untranslated requests ++ * @RISCV_IOMMU_HPMEVENT_TRQ: Translated requests ++ * @RISCV_IOMMU_HPMEVENT_ATS_RQ: ATS translation requests ++ * @RISCV_IOMMU_HPMEVENT_TLB_MISS: TLB misses ++ * @RISCV_IOMMU_HPMEVENT_DD_WALK: Device directory walks ++ * @RISCV_IOMMU_HPMEVENT_PD_WALK: Process directory walks ++ * @RISCV_IOMMU_HPMEVENT_S_VS_WALKS: First-stage page table walks ++ * @RISCV_IOMMU_HPMEVENT_G_WALKS: Second-stage page table walks ++ * @RISCV_IOMMU_HPMEVENT_MAX: Value to denote maximum Event IDs ++ */ ++enum riscv_iommu_hpmevent_id { ++ RISCV_IOMMU_HPMEVENT_INVALID = 0, ++ RISCV_IOMMU_HPMEVENT_URQ = 1, ++ RISCV_IOMMU_HPMEVENT_TRQ = 2, ++ RISCV_IOMMU_HPMEVENT_ATS_RQ = 3, ++ RISCV_IOMMU_HPMEVENT_TLB_MISS = 4, ++ RISCV_IOMMU_HPMEVENT_DD_WALK = 5, ++ RISCV_IOMMU_HPMEVENT_PD_WALK = 6, ++ RISCV_IOMMU_HPMEVENT_S_VS_WALKS = 7, ++ RISCV_IOMMU_HPMEVENT_G_WALKS = 8, ++ RISCV_IOMMU_HPMEVENT_MAX = 9 ++}; ++ ++/* 5.24 Translation request IOVA (64bits) */ ++#define RISCV_IOMMU_REG_TR_REQ_IOVA 0x0258 ++#define RISCV_IOMMU_TR_REQ_IOVA_VPN GENMASK_ULL(63, 12) ++ ++/* 5.25 Translation request control (64bits) */ ++#define RISCV_IOMMU_REG_TR_REQ_CTL 0x0260 ++#define RISCV_IOMMU_TR_REQ_CTL_GO_BUSY BIT_ULL(0) ++#define RISCV_IOMMU_TR_REQ_CTL_PRIV BIT_ULL(1) ++#define RISCV_IOMMU_TR_REQ_CTL_EXE BIT_ULL(2) ++#define RISCV_IOMMU_TR_REQ_CTL_NW BIT_ULL(3) ++#define RISCV_IOMMU_TR_REQ_CTL_PID GENMASK_ULL(31, 12) ++#define RISCV_IOMMU_TR_REQ_CTL_PV BIT_ULL(32) ++#define RISCV_IOMMU_TR_REQ_CTL_DID GENMASK_ULL(63, 40) ++ ++/* 5.26 Translation request response (64bits) */ ++#define RISCV_IOMMU_REG_TR_RESPONSE 0x0268 ++#define RISCV_IOMMU_TR_RESPONSE_FAULT BIT_ULL(0) ++#define RISCV_IOMMU_TR_RESPONSE_PBMT GENMASK_ULL(8, 7) ++#define RISCV_IOMMU_TR_RESPONSE_SZ BIT_ULL(9) ++#define RISCV_IOMMU_TR_RESPONSE_PPN RISCV_IOMMU_PPN_FIELD ++ ++/* 5.27 Interrupt cause to vector (64bits) */ ++#define RISCV_IOMMU_REG_ICVEC 0x02F8 ++#define RISCV_IOMMU_ICVEC_CIV GENMASK_ULL(3, 0) ++#define RISCV_IOMMU_ICVEC_FIV GENMASK_ULL(7, 4) ++#define RISCV_IOMMU_ICVEC_PMIV GENMASK_ULL(11, 8) ++#define RISCV_IOMMU_ICVEC_PIV GENMASK_ULL(15, 12) ++ ++/* 5.28 MSI Configuration table (32 * 64bits) */ ++#define RISCV_IOMMU_REG_MSI_CFG_TBL 0x0300 ++#define RISCV_IOMMU_REG_MSI_CFG_TBL_ADDR(_n) \ ++ (RISCV_IOMMU_REG_MSI_CFG_TBL + ((_n) * 0x10)) ++#define RISCV_IOMMU_MSI_CFG_TBL_ADDR GENMASK_ULL(55, 2) ++#define RISCV_IOMMU_REG_MSI_CFG_TBL_DATA(_n) \ ++ (RISCV_IOMMU_REG_MSI_CFG_TBL + ((_n) * 0x10) + 0x08) ++#define RISCV_IOMMU_MSI_CFG_TBL_DATA GENMASK_ULL(31, 0) ++#define RISCV_IOMMU_REG_MSI_CFG_TBL_CTRL(_n) \ ++ (RISCV_IOMMU_REG_MSI_CFG_TBL + ((_n) * 0x10) + 0x0C) ++#define RISCV_IOMMU_MSI_CFG_TBL_CTRL_M BIT_ULL(0) ++ ++#define RISCV_IOMMU_REG_SIZE 0x1000 ++ ++/* ++ * Chapter 2: Data structures ++ */ ++ ++/* ++ * Device Directory Table macros for non-leaf nodes ++ */ ++#define RISCV_IOMMU_DDTE_V BIT_ULL(0) ++#define RISCV_IOMMU_DDTE_PPN RISCV_IOMMU_PPN_FIELD ++ ++/** ++ * struct riscv_iommu_dc - Device Context ++ * @tc: Translation Control ++ * @iohgatp: I/O Hypervisor guest address translation and protection ++ * (Second stage context) ++ * @ta: Translation Attributes ++ * @fsc: First stage context ++ * @msiptp: MSI page table pointer ++ * @msi_addr_mask: MSI address mask ++ * @msi_addr_pattern: MSI address pattern ++ * @_reserved: Reserved for future use, padding ++ * ++ * This structure is used for leaf nodes on the Device Directory Table, ++ * in case RISCV_IOMMU_CAPABILITIES_MSI_FLAT is not set, the bottom 4 fields ++ * are not present and are skipped with pointer arithmetic to avoid ++ * casting, check out riscv_iommu_get_dc(). ++ * See section 2.1 for more details ++ */ ++struct riscv_iommu_dc { ++ u64 tc; ++ u64 iohgatp; ++ u64 ta; ++ u64 fsc; ++ u64 msiptp; ++ u64 msi_addr_mask; ++ u64 msi_addr_pattern; ++ u64 _reserved; ++}; ++ ++/* Translation control fields */ ++#define RISCV_IOMMU_DC_TC_V BIT_ULL(0) ++#define RISCV_IOMMU_DC_TC_EN_ATS BIT_ULL(1) ++#define RISCV_IOMMU_DC_TC_EN_PRI BIT_ULL(2) ++#define RISCV_IOMMU_DC_TC_T2GPA BIT_ULL(3) ++#define RISCV_IOMMU_DC_TC_DTF BIT_ULL(4) ++#define RISCV_IOMMU_DC_TC_PDTV BIT_ULL(5) ++#define RISCV_IOMMU_DC_TC_PRPR BIT_ULL(6) ++#define RISCV_IOMMU_DC_TC_GADE BIT_ULL(7) ++#define RISCV_IOMMU_DC_TC_SADE BIT_ULL(8) ++#define RISCV_IOMMU_DC_TC_DPE BIT_ULL(9) ++#define RISCV_IOMMU_DC_TC_SBE BIT_ULL(10) ++#define RISCV_IOMMU_DC_TC_SXL BIT_ULL(11) ++ ++/* Second-stage (aka G-stage) context fields */ ++#define RISCV_IOMMU_DC_IOHGATP_PPN RISCV_IOMMU_ATP_PPN_FIELD ++#define RISCV_IOMMU_DC_IOHGATP_GSCID GENMASK_ULL(59, 44) ++#define RISCV_IOMMU_DC_IOHGATP_MODE RISCV_IOMMU_ATP_MODE_FIELD ++ ++/** ++ * enum riscv_iommu_dc_iohgatp_modes - Guest address translation/protection modes ++ * @RISCV_IOMMU_DC_IOHGATP_MODE_BARE: No translation/protection ++ * @RISCV_IOMMU_DC_IOHGATP_MODE_SV32X4: Sv32x4 (2-bit extension of Sv32), when fctl.GXL == 1 ++ * @RISCV_IOMMU_DC_IOHGATP_MODE_SV39X4: Sv39x4 (2-bit extension of Sv39), when fctl.GXL == 0 ++ * @RISCV_IOMMU_DC_IOHGATP_MODE_SV48X4: Sv48x4 (2-bit extension of Sv48), when fctl.GXL == 0 ++ * @RISCV_IOMMU_DC_IOHGATP_MODE_SV57X4: Sv57x4 (2-bit extension of Sv57), when fctl.GXL == 0 ++ */ ++enum riscv_iommu_dc_iohgatp_modes { ++ RISCV_IOMMU_DC_IOHGATP_MODE_BARE = 0, ++ RISCV_IOMMU_DC_IOHGATP_MODE_SV32X4 = 8, ++ RISCV_IOMMU_DC_IOHGATP_MODE_SV39X4 = 8, ++ RISCV_IOMMU_DC_IOHGATP_MODE_SV48X4 = 9, ++ RISCV_IOMMU_DC_IOHGATP_MODE_SV57X4 = 10 ++}; ++ ++/* Translation attributes fields */ ++#define RISCV_IOMMU_DC_TA_PSCID GENMASK_ULL(31, 12) ++ ++/* First-stage context fields */ ++#define RISCV_IOMMU_DC_FSC_PPN RISCV_IOMMU_ATP_PPN_FIELD ++#define RISCV_IOMMU_DC_FSC_MODE RISCV_IOMMU_ATP_MODE_FIELD ++ ++/** ++ * enum riscv_iommu_dc_fsc_atp_modes - First stage address translation/protection modes ++ * @RISCV_IOMMU_DC_FSC_MODE_BARE: No translation/protection ++ * @RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV32: Sv32, when dc.tc.SXL == 1 ++ * @RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39: Sv39, when dc.tc.SXL == 0 ++ * @RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV48: Sv48, when dc.tc.SXL == 0 ++ * @RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV57: Sv57, when dc.tc.SXL == 0 ++ * @RISCV_IOMMU_DC_FSC_PDTP_MODE_PD8: 1lvl PDT, 8bit process ids ++ * @RISCV_IOMMU_DC_FSC_PDTP_MODE_PD17: 2lvl PDT, 17bit process ids ++ * @RISCV_IOMMU_DC_FSC_PDTP_MODE_PD20: 3lvl PDT, 20bit process ids ++ * ++ * FSC holds IOSATP when RISCV_IOMMU_DC_TC_PDTV is 0 and PDTP otherwise. ++ * IOSATP controls the first stage address translation (same as the satp register on ++ * the RISC-V MMU), and PDTP holds the process directory table, used to select a ++ * first stage page table based on a process id (for devices that support multiple ++ * process ids). ++ */ ++enum riscv_iommu_dc_fsc_atp_modes { ++ RISCV_IOMMU_DC_FSC_MODE_BARE = 0, ++ RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV32 = 8, ++ RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39 = 8, ++ RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV48 = 9, ++ RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV57 = 10, ++ RISCV_IOMMU_DC_FSC_PDTP_MODE_PD8 = 1, ++ RISCV_IOMMU_DC_FSC_PDTP_MODE_PD17 = 2, ++ RISCV_IOMMU_DC_FSC_PDTP_MODE_PD20 = 3 ++}; ++ ++/* MSI page table pointer */ ++#define RISCV_IOMMU_DC_MSIPTP_PPN RISCV_IOMMU_ATP_PPN_FIELD ++#define RISCV_IOMMU_DC_MSIPTP_MODE RISCV_IOMMU_ATP_MODE_FIELD ++#define RISCV_IOMMU_DC_MSIPTP_MODE_OFF 0 ++#define RISCV_IOMMU_DC_MSIPTP_MODE_FLAT 1 ++ ++/* MSI address mask */ ++#define RISCV_IOMMU_DC_MSI_ADDR_MASK GENMASK_ULL(51, 0) ++ ++/* MSI address pattern */ ++#define RISCV_IOMMU_DC_MSI_PATTERN GENMASK_ULL(51, 0) ++ ++/** ++ * struct riscv_iommu_pc - Process Context ++ * @ta: Translation Attributes ++ * @fsc: First stage context ++ * ++ * This structure is used for leaf nodes on the Process Directory Table ++ * See section 2.3 for more details ++ */ ++struct riscv_iommu_pc { ++ u64 ta; ++ u64 fsc; ++}; ++ ++/* Translation attributes fields */ ++#define RISCV_IOMMU_PC_TA_V BIT_ULL(0) ++#define RISCV_IOMMU_PC_TA_ENS BIT_ULL(1) ++#define RISCV_IOMMU_PC_TA_SUM BIT_ULL(2) ++#define RISCV_IOMMU_PC_TA_PSCID GENMASK_ULL(31, 12) ++ ++/* First stage context fields */ ++#define RISCV_IOMMU_PC_FSC_PPN RISCV_IOMMU_ATP_PPN_FIELD ++#define RISCV_IOMMU_PC_FSC_MODE RISCV_IOMMU_ATP_MODE_FIELD ++ ++/* ++ * Chapter 3: In-memory queue interface ++ */ ++ ++/** ++ * struct riscv_iommu_command - Generic IOMMU command structure ++ * @dword0: Includes the opcode and the function identifier ++ * @dword1: Opcode specific data ++ * ++ * The commands are interpreted as two 64bit fields, where the first ++ * 7bits of the first field are the opcode which also defines the ++ * command's format, followed by a 3bit field that specifies the ++ * function invoked by that command, and the rest is opcode-specific. ++ * This is a generic struct which will be populated differently ++ * according to each command. For more infos on the commands and ++ * the command queue check section 3.1. ++ */ ++struct riscv_iommu_command { ++ u64 dword0; ++ u64 dword1; ++}; ++ ++/* Fields on dword0, common for all commands */ ++#define RISCV_IOMMU_CMD_OPCODE GENMASK_ULL(6, 0) ++#define RISCV_IOMMU_CMD_FUNC GENMASK_ULL(9, 7) ++ ++/* 3.1.1 IOMMU Page-table cache invalidation */ ++/* Fields on dword0 */ ++#define RISCV_IOMMU_CMD_IOTINVAL_OPCODE 1 ++#define RISCV_IOMMU_CMD_IOTINVAL_FUNC_VMA 0 ++#define RISCV_IOMMU_CMD_IOTINVAL_FUNC_GVMA 1 ++#define RISCV_IOMMU_CMD_IOTINVAL_AV BIT_ULL(10) ++#define RISCV_IOMMU_CMD_IOTINVAL_PSCID GENMASK_ULL(31, 12) ++#define RISCV_IOMMU_CMD_IOTINVAL_PSCV BIT_ULL(32) ++#define RISCV_IOMMU_CMD_IOTINVAL_GV BIT_ULL(33) ++#define RISCV_IOMMU_CMD_IOTINVAL_GSCID GENMASK_ULL(59, 44) ++/* dword1[61:10] is the 4K-aligned page address */ ++#define RISCV_IOMMU_CMD_IOTINVAL_ADDR GENMASK_ULL(61, 10) ++ ++/* 3.1.2 IOMMU Command Queue Fences */ ++/* Fields on dword0 */ ++#define RISCV_IOMMU_CMD_IOFENCE_OPCODE 2 ++#define RISCV_IOMMU_CMD_IOFENCE_FUNC_C 0 ++#define RISCV_IOMMU_CMD_IOFENCE_AV BIT_ULL(10) ++#define RISCV_IOMMU_CMD_IOFENCE_WSI BIT_ULL(11) ++#define RISCV_IOMMU_CMD_IOFENCE_PR BIT_ULL(12) ++#define RISCV_IOMMU_CMD_IOFENCE_PW BIT_ULL(13) ++#define RISCV_IOMMU_CMD_IOFENCE_DATA GENMASK_ULL(63, 32) ++/* dword1 is the address, word-size aligned and shifted to the right by two bits. */ ++ ++/* 3.1.3 IOMMU Directory cache invalidation */ ++/* Fields on dword0 */ ++#define RISCV_IOMMU_CMD_IODIR_OPCODE 3 ++#define RISCV_IOMMU_CMD_IODIR_FUNC_INVAL_DDT 0 ++#define RISCV_IOMMU_CMD_IODIR_FUNC_INVAL_PDT 1 ++#define RISCV_IOMMU_CMD_IODIR_PID GENMASK_ULL(31, 12) ++#define RISCV_IOMMU_CMD_IODIR_DV BIT_ULL(33) ++#define RISCV_IOMMU_CMD_IODIR_DID GENMASK_ULL(63, 40) ++/* dword1 is reserved for standard use */ ++ ++/* 3.1.4 IOMMU PCIe ATS */ ++/* Fields on dword0 */ ++#define RISCV_IOMMU_CMD_ATS_OPCODE 4 ++#define RISCV_IOMMU_CMD_ATS_FUNC_INVAL 0 ++#define RISCV_IOMMU_CMD_ATS_FUNC_PRGR 1 ++#define RISCV_IOMMU_CMD_ATS_PID GENMASK_ULL(31, 12) ++#define RISCV_IOMMU_CMD_ATS_PV BIT_ULL(32) ++#define RISCV_IOMMU_CMD_ATS_DSV BIT_ULL(33) ++#define RISCV_IOMMU_CMD_ATS_RID GENMASK_ULL(55, 40) ++#define RISCV_IOMMU_CMD_ATS_DSEG GENMASK_ULL(63, 56) ++/* dword1 is the ATS payload, two different payload types for INVAL and PRGR */ ++ ++/* ATS.INVAL payload*/ ++#define RISCV_IOMMU_CMD_ATS_INVAL_G BIT_ULL(0) ++/* Bits 1 - 10 are zeroed */ ++#define RISCV_IOMMU_CMD_ATS_INVAL_S BIT_ULL(11) ++#define RISCV_IOMMU_CMD_ATS_INVAL_UADDR GENMASK_ULL(63, 12) ++ ++/* ATS.PRGR payload */ ++/* Bits 0 - 31 are zeroed */ ++#define RISCV_IOMMU_CMD_ATS_PRGR_PRG_INDEX GENMASK_ULL(40, 32) ++/* Bits 41 - 43 are zeroed */ ++#define RISCV_IOMMU_CMD_ATS_PRGR_RESP_CODE GENMASK_ULL(47, 44) ++#define RISCV_IOMMU_CMD_ATS_PRGR_DST_ID GENMASK_ULL(63, 48) ++ ++/** ++ * struct riscv_iommu_fq_record - Fault/Event Queue Record ++ * @hdr: Header, includes fault/event cause, PID/DID, transaction type etc ++ * @_reserved: Low 32bits for custom use, high 32bits for standard use ++ * @iotval: Transaction-type/cause specific format ++ * @iotval2: Cause specific format ++ * ++ * The fault/event queue reports events and failures raised when ++ * processing transactions. Each record is a 32byte structure where ++ * the first dword has a fixed format for providing generic infos ++ * regarding the fault/event, and two more dwords are there for ++ * fault/event-specific information. For more details see section ++ * 3.2. ++ */ ++struct riscv_iommu_fq_record { ++ u64 hdr; ++ u64 _reserved; ++ u64 iotval; ++ u64 iotval2; ++}; ++ ++/* Fields on header */ ++#define RISCV_IOMMU_FQ_HDR_CAUSE GENMASK_ULL(11, 0) ++#define RISCV_IOMMU_FQ_HDR_PID GENMASK_ULL(31, 12) ++#define RISCV_IOMMU_FQ_HDR_PV BIT_ULL(32) ++#define RISCV_IOMMU_FQ_HDR_PRIV BIT_ULL(33) ++#define RISCV_IOMMU_FQ_HDR_TTYP GENMASK_ULL(39, 34) ++#define RISCV_IOMMU_FQ_HDR_DID GENMASK_ULL(63, 40) ++ ++/** ++ * enum riscv_iommu_fq_causes - Fault/event cause values ++ * @RISCV_IOMMU_FQ_CAUSE_INST_FAULT: Instruction access fault ++ * @RISCV_IOMMU_FQ_CAUSE_RD_ADDR_MISALIGNED: Read address misaligned ++ * @RISCV_IOMMU_FQ_CAUSE_RD_FAULT: Read load fault ++ * @RISCV_IOMMU_FQ_CAUSE_WR_ADDR_MISALIGNED: Write/AMO address misaligned ++ * @RISCV_IOMMU_FQ_CAUSE_WR_FAULT: Write/AMO access fault ++ * @RISCV_IOMMU_FQ_CAUSE_INST_FAULT_S: Instruction page fault ++ * @RISCV_IOMMU_FQ_CAUSE_RD_FAULT_S: Read page fault ++ * @RISCV_IOMMU_FQ_CAUSE_WR_FAULT_S: Write/AMO page fault ++ * @RISCV_IOMMU_FQ_CAUSE_INST_FAULT_VS: Instruction guest page fault ++ * @RISCV_IOMMU_FQ_CAUSE_RD_FAULT_VS: Read guest page fault ++ * @RISCV_IOMMU_FQ_CAUSE_WR_FAULT_VS: Write/AMO guest page fault ++ * @RISCV_IOMMU_FQ_CAUSE_DMA_DISABLED: All inbound transactions disallowed ++ * @RISCV_IOMMU_FQ_CAUSE_DDT_LOAD_FAULT: DDT entry load access fault ++ * @RISCV_IOMMU_FQ_CAUSE_DDT_INVALID: DDT entry invalid ++ * @RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED: DDT entry misconfigured ++ * @RISCV_IOMMU_FQ_CAUSE_TTYP_BLOCKED: Transaction type disallowed ++ * @RISCV_IOMMU_FQ_CAUSE_MSI_LOAD_FAULT: MSI PTE load access fault ++ * @RISCV_IOMMU_FQ_CAUSE_MSI_INVALID: MSI PTE invalid ++ * @RISCV_IOMMU_FQ_CAUSE_MSI_MISCONFIGURED: MSI PTE misconfigured ++ * @RISCV_IOMMU_FQ_CAUSE_MRIF_FAULT: MRIF access fault ++ * @RISCV_IOMMU_FQ_CAUSE_PDT_LOAD_FAULT: PDT entry load access fault ++ * @RISCV_IOMMU_FQ_CAUSE_PDT_INVALID: PDT entry invalid ++ * @RISCV_IOMMU_FQ_CAUSE_PDT_MISCONFIGURED: PDT entry misconfigured ++ * @RISCV_IOMMU_FQ_CAUSE_DDT_CORRUPTED: DDT data corruption ++ * @RISCV_IOMMU_FQ_CAUSE_PDT_CORRUPTED: PDT data corruption ++ * @RISCV_IOMMU_FQ_CAUSE_MSI_PT_CORRUPTED: MSI page table data corruption ++ * @RISCV_IOMMU_FQ_CAUSE_MRIF_CORRUIPTED: MRIF data corruption ++ * @RISCV_IOMMU_FQ_CAUSE_INTERNAL_DP_ERROR: Internal data path error ++ * @RISCV_IOMMU_FQ_CAUSE_MSI_WR_FAULT: IOMMU MSI write access fault ++ * @RISCV_IOMMU_FQ_CAUSE_PT_CORRUPTED: First/second stage page table data corruption ++ * ++ * Values are on table 11 of the spec, encodings 275 - 2047 are reserved for standard ++ * use, and 2048 - 4095 for custom use. ++ */ ++enum riscv_iommu_fq_causes { ++ RISCV_IOMMU_FQ_CAUSE_INST_FAULT = 1, ++ RISCV_IOMMU_FQ_CAUSE_RD_ADDR_MISALIGNED = 4, ++ RISCV_IOMMU_FQ_CAUSE_RD_FAULT = 5, ++ RISCV_IOMMU_FQ_CAUSE_WR_ADDR_MISALIGNED = 6, ++ RISCV_IOMMU_FQ_CAUSE_WR_FAULT = 7, ++ RISCV_IOMMU_FQ_CAUSE_INST_FAULT_S = 12, ++ RISCV_IOMMU_FQ_CAUSE_RD_FAULT_S = 13, ++ RISCV_IOMMU_FQ_CAUSE_WR_FAULT_S = 15, ++ RISCV_IOMMU_FQ_CAUSE_INST_FAULT_VS = 20, ++ RISCV_IOMMU_FQ_CAUSE_RD_FAULT_VS = 21, ++ RISCV_IOMMU_FQ_CAUSE_WR_FAULT_VS = 23, ++ RISCV_IOMMU_FQ_CAUSE_DMA_DISABLED = 256, ++ RISCV_IOMMU_FQ_CAUSE_DDT_LOAD_FAULT = 257, ++ RISCV_IOMMU_FQ_CAUSE_DDT_INVALID = 258, ++ RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED = 259, ++ RISCV_IOMMU_FQ_CAUSE_TTYP_BLOCKED = 260, ++ RISCV_IOMMU_FQ_CAUSE_MSI_LOAD_FAULT = 261, ++ RISCV_IOMMU_FQ_CAUSE_MSI_INVALID = 262, ++ RISCV_IOMMU_FQ_CAUSE_MSI_MISCONFIGURED = 263, ++ RISCV_IOMMU_FQ_CAUSE_MRIF_FAULT = 264, ++ RISCV_IOMMU_FQ_CAUSE_PDT_LOAD_FAULT = 265, ++ RISCV_IOMMU_FQ_CAUSE_PDT_INVALID = 266, ++ RISCV_IOMMU_FQ_CAUSE_PDT_MISCONFIGURED = 267, ++ RISCV_IOMMU_FQ_CAUSE_DDT_CORRUPTED = 268, ++ RISCV_IOMMU_FQ_CAUSE_PDT_CORRUPTED = 269, ++ RISCV_IOMMU_FQ_CAUSE_MSI_PT_CORRUPTED = 270, ++ RISCV_IOMMU_FQ_CAUSE_MRIF_CORRUIPTED = 271, ++ RISCV_IOMMU_FQ_CAUSE_INTERNAL_DP_ERROR = 272, ++ RISCV_IOMMU_FQ_CAUSE_MSI_WR_FAULT = 273, ++ RISCV_IOMMU_FQ_CAUSE_PT_CORRUPTED = 274 ++}; ++ ++/** ++ * enum riscv_iommu_fq_ttypes: Fault/event transaction types ++ * @RISCV_IOMMU_FQ_TTYP_NONE: None. Fault not caused by an inbound transaction. ++ * @RISCV_IOMMU_FQ_TTYP_UADDR_INST_FETCH: Instruction fetch from untranslated address ++ * @RISCV_IOMMU_FQ_TTYP_UADDR_RD: Read from untranslated address ++ * @RISCV_IOMMU_FQ_TTYP_UADDR_WR: Write/AMO to untranslated address ++ * @RISCV_IOMMU_FQ_TTYP_TADDR_INST_FETCH: Instruction fetch from translated address ++ * @RISCV_IOMMU_FQ_TTYP_TADDR_RD: Read from translated address ++ * @RISCV_IOMMU_FQ_TTYP_TADDR_WR: Write/AMO to translated address ++ * @RISCV_IOMMU_FQ_TTYP_PCIE_ATS_REQ: PCIe ATS translation request ++ * @RISCV_IOMMU_FQ_TTYP_PCIE_MSG_REQ: PCIe message request ++ * ++ * Values are on table 12 of the spec, type 4 and 10 - 31 are reserved for standard use ++ * and 31 - 63 for custom use. ++ */ ++enum riscv_iommu_fq_ttypes { ++ RISCV_IOMMU_FQ_TTYP_NONE = 0, ++ RISCV_IOMMU_FQ_TTYP_UADDR_INST_FETCH = 1, ++ RISCV_IOMMU_FQ_TTYP_UADDR_RD = 2, ++ RISCV_IOMMU_FQ_TTYP_UADDR_WR = 3, ++ RISCV_IOMMU_FQ_TTYP_TADDR_INST_FETCH = 5, ++ RISCV_IOMMU_FQ_TTYP_TADDR_RD = 6, ++ RISCV_IOMMU_FQ_TTYP_TADDR_WR = 7, ++ RISCV_IOMMU_FQ_TTYP_PCIE_ATS_REQ = 8, ++ RISCV_IOMMU_FQ_TTYP_PCIE_MSG_REQ = 9, ++}; ++ ++/** ++ * struct riscv_iommu_pq_record - PCIe Page Request record ++ * @hdr: Header, includes PID, DID etc ++ * @payload: Holds the page address, request group and permission bits ++ * ++ * For more infos on the PCIe Page Request queue see chapter 3.3. ++ */ ++struct riscv_iommu_pq_record { ++ u64 hdr; ++ u64 payload; ++}; ++ ++/* Header fields */ ++#define RISCV_IOMMU_PQ_HDR_PID GENMASK_ULL(31, 12) ++#define RISCV_IOMMU_PQ_HDR_PV BIT_ULL(32) ++#define RISCV_IOMMU_PQ_HDR_PRIV BIT_ULL(33) ++#define RISCV_IOMMU_PQ_HDR_EXEC BIT_ULL(34) ++#define RISCV_IOMMU_PQ_HDR_DID GENMASK_ULL(63, 40) ++ ++/* Payload fields */ ++#define RISCV_IOMMU_PQ_PAYLOAD_R BIT_ULL(0) ++#define RISCV_IOMMU_PQ_PAYLOAD_W BIT_ULL(1) ++#define RISCV_IOMMU_PQ_PAYLOAD_L BIT_ULL(2) ++#define RISCV_IOMMU_PQ_PAYLOAD_RWL_MASK GENMASK_ULL(2, 0) ++#define RISCV_IOMMU_PQ_PAYLOAD_PRGI GENMASK_ULL(11, 3) /* Page Request Group Index */ ++#define RISCV_IOMMU_PQ_PAYLOAD_ADDR GENMASK_ULL(63, 12) ++ ++/** ++ * struct riscv_iommu_msipte - MSI Page Table Entry ++ * @pte: MSI PTE ++ * @mrif_info: Memory-resident interrupt file info ++ * ++ * The MSI Page Table is used for virtualizing MSIs, so that when ++ * a device sends an MSI to a guest, the IOMMU can reroute it ++ * by translating the MSI address, either to a guest interrupt file ++ * or a memory resident interrupt file (MRIF). Note that this page table ++ * is an array of MSI PTEs, not a multi-level pt, each entry ++ * is a leaf entry. For more infos check out the AIA spec, chapter 9.5. ++ * ++ * Also in basic mode the mrif_info field is ignored by the IOMMU and can ++ * be used by software, any other reserved fields on pte must be zeroed-out ++ * by software. ++ */ ++struct riscv_iommu_msipte { ++ u64 pte; ++ u64 mrif_info; ++}; ++ ++/* Fields on pte */ ++#define RISCV_IOMMU_MSIPTE_V BIT_ULL(0) ++#define RISCV_IOMMU_MSIPTE_M GENMASK_ULL(2, 1) ++#define RISCV_IOMMU_MSIPTE_MRIF_ADDR GENMASK_ULL(53, 7) /* When M == 1 (MRIF mode) */ ++#define RISCV_IOMMU_MSIPTE_PPN RISCV_IOMMU_PPN_FIELD /* When M == 3 (basic mode) */ ++#define RISCV_IOMMU_MSIPTE_C BIT_ULL(63) ++ ++/* Fields on mrif_info */ ++#define RISCV_IOMMU_MSIPTE_MRIF_NID GENMASK_ULL(9, 0) ++#define RISCV_IOMMU_MSIPTE_MRIF_NPPN RISCV_IOMMU_PPN_FIELD ++#define RISCV_IOMMU_MSIPTE_MRIF_NID_MSB BIT_ULL(60) ++ ++/* Helper functions: command structure builders. */ ++ ++static inline void riscv_iommu_cmd_inval_vma(struct riscv_iommu_command *cmd) ++{ ++ cmd->dword0 = FIELD_PREP(RISCV_IOMMU_CMD_OPCODE, RISCV_IOMMU_CMD_IOTINVAL_OPCODE) | ++ FIELD_PREP(RISCV_IOMMU_CMD_FUNC, RISCV_IOMMU_CMD_IOTINVAL_FUNC_VMA); ++ cmd->dword1 = 0; ++} ++ ++static inline void riscv_iommu_cmd_inval_set_addr(struct riscv_iommu_command *cmd, ++ u64 addr) ++{ ++ cmd->dword1 = FIELD_PREP(RISCV_IOMMU_CMD_IOTINVAL_ADDR, phys_to_pfn(addr)); ++ cmd->dword0 |= RISCV_IOMMU_CMD_IOTINVAL_AV; ++} ++ ++static inline void riscv_iommu_cmd_inval_set_pscid(struct riscv_iommu_command *cmd, ++ int pscid) ++{ ++ cmd->dword0 |= FIELD_PREP(RISCV_IOMMU_CMD_IOTINVAL_PSCID, pscid) | ++ RISCV_IOMMU_CMD_IOTINVAL_PSCV; ++} ++ ++static inline void riscv_iommu_cmd_inval_set_gscid(struct riscv_iommu_command *cmd, ++ int gscid) ++{ ++ cmd->dword0 |= FIELD_PREP(RISCV_IOMMU_CMD_IOTINVAL_GSCID, gscid) | ++ RISCV_IOMMU_CMD_IOTINVAL_GV; ++} ++ ++static inline void riscv_iommu_cmd_iofence(struct riscv_iommu_command *cmd) ++{ ++ cmd->dword0 = FIELD_PREP(RISCV_IOMMU_CMD_OPCODE, RISCV_IOMMU_CMD_IOFENCE_OPCODE) | ++ FIELD_PREP(RISCV_IOMMU_CMD_FUNC, RISCV_IOMMU_CMD_IOFENCE_FUNC_C) | ++ RISCV_IOMMU_CMD_IOFENCE_PR | RISCV_IOMMU_CMD_IOFENCE_PW; ++ cmd->dword1 = 0; ++} ++ ++static inline void riscv_iommu_cmd_iofence_set_av(struct riscv_iommu_command *cmd, ++ u64 addr, u32 data) ++{ ++ cmd->dword0 = FIELD_PREP(RISCV_IOMMU_CMD_OPCODE, RISCV_IOMMU_CMD_IOFENCE_OPCODE) | ++ FIELD_PREP(RISCV_IOMMU_CMD_FUNC, RISCV_IOMMU_CMD_IOFENCE_FUNC_C) | ++ FIELD_PREP(RISCV_IOMMU_CMD_IOFENCE_DATA, data) | ++ RISCV_IOMMU_CMD_IOFENCE_AV; ++ cmd->dword1 = addr >> 2; ++} ++ ++static inline void riscv_iommu_cmd_iodir_inval_ddt(struct riscv_iommu_command *cmd) ++{ ++ cmd->dword0 = FIELD_PREP(RISCV_IOMMU_CMD_OPCODE, RISCV_IOMMU_CMD_IODIR_OPCODE) | ++ FIELD_PREP(RISCV_IOMMU_CMD_FUNC, RISCV_IOMMU_CMD_IODIR_FUNC_INVAL_DDT); ++ cmd->dword1 = 0; ++} ++ ++static inline void riscv_iommu_cmd_iodir_inval_pdt(struct riscv_iommu_command *cmd) ++{ ++ cmd->dword0 = FIELD_PREP(RISCV_IOMMU_CMD_OPCODE, RISCV_IOMMU_CMD_IODIR_OPCODE) | ++ FIELD_PREP(RISCV_IOMMU_CMD_FUNC, RISCV_IOMMU_CMD_IODIR_FUNC_INVAL_PDT); ++ cmd->dword1 = 0; ++} ++ ++static inline void riscv_iommu_cmd_iodir_set_did(struct riscv_iommu_command *cmd, ++ unsigned int devid) ++{ ++ cmd->dword0 |= FIELD_PREP(RISCV_IOMMU_CMD_IODIR_DID, devid) | ++ RISCV_IOMMU_CMD_IODIR_DV; ++} ++ ++static inline void riscv_iommu_cmd_iodir_set_pid(struct riscv_iommu_command *cmd, ++ unsigned int pasid) ++{ ++ cmd->dword0 |= FIELD_PREP(RISCV_IOMMU_CMD_IODIR_PID, pasid); ++} ++ ++#endif /* _RISCV_IOMMU_BITS_H_ */ +diff --git a/drivers/iommu/riscv/iommu-pci.c b/drivers/iommu/riscv/iommu-pci.c +new file mode 100644 +index 000000000000..c7a89143014c +--- /dev/null ++++ b/drivers/iommu/riscv/iommu-pci.c +@@ -0,0 +1,120 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++ ++/* ++ * Copyright © 2022-2024 Rivos Inc. ++ * Copyright © 2023 FORTH-ICS/CARV ++ * ++ * RISCV IOMMU as a PCIe device ++ * ++ * Authors ++ * Tomasz Jeznach ++ * Nick Kossifidis ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "iommu-bits.h" ++#include "iommu.h" ++ ++/* QEMU RISC-V IOMMU implementation */ ++#define PCI_DEVICE_ID_REDHAT_RISCV_IOMMU 0x0014 ++ ++/* Rivos Inc. assigned PCI Vendor and Device IDs */ ++#ifndef PCI_VENDOR_ID_RIVOS ++#define PCI_VENDOR_ID_RIVOS 0x1efd ++#endif ++ ++#define PCI_DEVICE_ID_RIVOS_RISCV_IOMMU_GA 0x0008 ++ ++static int riscv_iommu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ struct device *dev = &pdev->dev; ++ struct riscv_iommu_device *iommu; ++ int rc, vec; ++ ++ rc = pcim_enable_device(pdev); ++ if (rc) ++ return rc; ++ ++ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) ++ return -ENODEV; ++ ++ if (pci_resource_len(pdev, 0) < RISCV_IOMMU_REG_SIZE) ++ return -ENODEV; ++ ++ rc = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); ++ if (rc) ++ return dev_err_probe(dev, rc, "pcim_iomap_regions failed\n"); ++ ++ iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); ++ if (!iommu) ++ return -ENOMEM; ++ ++ iommu->dev = dev; ++ iommu->reg = pcim_iomap_table(pdev)[0]; ++ ++ pci_set_master(pdev); ++ dev_set_drvdata(dev, iommu); ++ ++ /* Check device reported capabilities / features. */ ++ iommu->caps = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_CAPABILITIES); ++ iommu->fctl = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_FCTL); ++ ++ /* The PCI driver only uses MSIs, make sure the IOMMU supports this */ ++ switch (FIELD_GET(RISCV_IOMMU_CAPABILITIES_IGS, iommu->caps)) { ++ case RISCV_IOMMU_CAPABILITIES_IGS_MSI: ++ case RISCV_IOMMU_CAPABILITIES_IGS_BOTH: ++ break; ++ default: ++ return dev_err_probe(dev, -ENODEV, ++ "unable to use message-signaled interrupts\n"); ++ } ++ ++ /* Allocate and assign IRQ vectors for the various events */ ++ rc = pci_alloc_irq_vectors(pdev, 1, RISCV_IOMMU_INTR_COUNT, ++ PCI_IRQ_MSIX | PCI_IRQ_MSI); ++ if (rc <= 0) ++ return dev_err_probe(dev, -ENODEV, ++ "unable to allocate irq vectors\n"); ++ ++ iommu->irqs_count = rc; ++ for (vec = 0; vec < iommu->irqs_count; vec++) ++ iommu->irqs[vec] = msi_get_virq(dev, vec); ++ ++ /* Enable message-signaled interrupts, fctl.WSI */ ++ if (iommu->fctl & RISCV_IOMMU_FCTL_WSI) { ++ iommu->fctl ^= RISCV_IOMMU_FCTL_WSI; ++ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, iommu->fctl); ++ } ++ ++ return riscv_iommu_init(iommu); ++} ++ ++static void riscv_iommu_pci_remove(struct pci_dev *pdev) ++{ ++ struct riscv_iommu_device *iommu = dev_get_drvdata(&pdev->dev); ++ ++ riscv_iommu_remove(iommu); ++} ++ ++static const struct pci_device_id riscv_iommu_pci_tbl[] = { ++ {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_RISCV_IOMMU), 0}, ++ {PCI_VDEVICE(RIVOS, PCI_DEVICE_ID_RIVOS_RISCV_IOMMU_GA), 0}, ++ {0,} ++}; ++ ++static struct pci_driver riscv_iommu_pci_driver = { ++ .name = KBUILD_MODNAME, ++ .id_table = riscv_iommu_pci_tbl, ++ .probe = riscv_iommu_pci_probe, ++ .remove = riscv_iommu_pci_remove, ++ .driver = { ++ .suppress_bind_attrs = true, ++ }, ++}; ++ ++builtin_pci_driver(riscv_iommu_pci_driver); +diff --git a/drivers/iommu/riscv/iommu-platform.c b/drivers/iommu/riscv/iommu-platform.c +new file mode 100644 +index 000000000000..da336863f152 +--- /dev/null ++++ b/drivers/iommu/riscv/iommu-platform.c +@@ -0,0 +1,92 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * RISC-V IOMMU as a platform device ++ * ++ * Copyright © 2023 FORTH-ICS/CARV ++ * Copyright © 2023-2024 Rivos Inc. ++ * ++ * Authors ++ * Nick Kossifidis ++ * Tomasz Jeznach ++ */ ++ ++#include ++#include ++#include ++ ++#include "iommu-bits.h" ++#include "iommu.h" ++ ++static int riscv_iommu_platform_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct riscv_iommu_device *iommu = NULL; ++ struct resource *res = NULL; ++ int vec; ++ ++ iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); ++ if (!iommu) ++ return -ENOMEM; ++ ++ iommu->dev = dev; ++ iommu->reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res); ++ if (IS_ERR(iommu->reg)) ++ return dev_err_probe(dev, PTR_ERR(iommu->reg), ++ "could not map register region\n"); ++ ++ dev_set_drvdata(dev, iommu); ++ ++ /* Check device reported capabilities / features. */ ++ iommu->caps = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_CAPABILITIES); ++ iommu->fctl = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_FCTL); ++ ++ /* For now we only support WSI */ ++ switch (FIELD_GET(RISCV_IOMMU_CAPABILITIES_IGS, iommu->caps)) { ++ case RISCV_IOMMU_CAPABILITIES_IGS_WSI: ++ case RISCV_IOMMU_CAPABILITIES_IGS_BOTH: ++ break; ++ default: ++ return dev_err_probe(dev, -ENODEV, ++ "unable to use wire-signaled interrupts\n"); ++ } ++ ++ iommu->irqs_count = platform_irq_count(pdev); ++ if (iommu->irqs_count <= 0) ++ return dev_err_probe(dev, -ENODEV, ++ "no IRQ resources provided\n"); ++ if (iommu->irqs_count > RISCV_IOMMU_INTR_COUNT) ++ iommu->irqs_count = RISCV_IOMMU_INTR_COUNT; ++ ++ for (vec = 0; vec < iommu->irqs_count; vec++) ++ iommu->irqs[vec] = platform_get_irq(pdev, vec); ++ ++ /* Enable wire-signaled interrupts, fctl.WSI */ ++ if (!(iommu->fctl & RISCV_IOMMU_FCTL_WSI)) { ++ iommu->fctl |= RISCV_IOMMU_FCTL_WSI; ++ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, iommu->fctl); ++ } ++ ++ return riscv_iommu_init(iommu); ++}; ++ ++static void riscv_iommu_platform_remove(struct platform_device *pdev) ++{ ++ riscv_iommu_remove(dev_get_drvdata(&pdev->dev)); ++}; ++ ++static const struct of_device_id riscv_iommu_of_match[] = { ++ {.compatible = "riscv,iommu",}, ++ {}, ++}; ++ ++static struct platform_driver riscv_iommu_platform_driver = { ++ .probe = riscv_iommu_platform_probe, ++ .remove_new = riscv_iommu_platform_remove, ++ .driver = { ++ .name = "riscv,iommu", ++ .of_match_table = riscv_iommu_of_match, ++ .suppress_bind_attrs = true, ++ }, ++}; ++ ++builtin_platform_driver(riscv_iommu_platform_driver); +diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c +new file mode 100644 +index 000000000000..8a05def774bd +--- /dev/null ++++ b/drivers/iommu/riscv/iommu.c +@@ -0,0 +1,1661 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * IOMMU API for RISC-V IOMMU implementations. ++ * ++ * Copyright © 2022-2024 Rivos Inc. ++ * Copyright © 2023 FORTH-ICS/CARV ++ * ++ * Authors ++ * Tomasz Jeznach ++ * Nick Kossifidis ++ */ ++ ++#define pr_fmt(fmt) "riscv-iommu: " fmt ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "../iommu-pages.h" ++#include "iommu-bits.h" ++#include "iommu.h" ++ ++/* Timeouts in [us] */ ++#define RISCV_IOMMU_QCSR_TIMEOUT 150000 ++#define RISCV_IOMMU_QUEUE_TIMEOUT 150000 ++#define RISCV_IOMMU_DDTP_TIMEOUT 10000000 ++#define RISCV_IOMMU_IOTINVAL_TIMEOUT 90000000 ++ ++/* Number of entries per CMD/FLT queue, should be <= INT_MAX */ ++#define RISCV_IOMMU_DEF_CQ_COUNT 8192 ++#define RISCV_IOMMU_DEF_FQ_COUNT 4096 ++ ++/* RISC-V IOMMU PPN <> PHYS address conversions, PHYS <=> PPN[53:10] */ ++#define phys_to_ppn(pa) (((pa) >> 2) & (((1ULL << 44) - 1) << 10)) ++#define ppn_to_phys(pn) (((pn) << 2) & (((1ULL << 44) - 1) << 12)) ++ ++#define dev_to_iommu(dev) \ ++ iommu_get_iommu_dev(dev, struct riscv_iommu_device, iommu) ++ ++/* IOMMU PSCID allocation namespace. */ ++static DEFINE_IDA(riscv_iommu_pscids); ++#define RISCV_IOMMU_MAX_PSCID (BIT(20) - 1) ++ ++/* Device resource-managed allocations */ ++struct riscv_iommu_devres { ++ void *addr; ++ int order; ++}; ++ ++static void riscv_iommu_devres_pages_release(struct device *dev, void *res) ++{ ++ struct riscv_iommu_devres *devres = res; ++ ++ iommu_free_pages(devres->addr, devres->order); ++} ++ ++static int riscv_iommu_devres_pages_match(struct device *dev, void *res, void *p) ++{ ++ struct riscv_iommu_devres *devres = res; ++ struct riscv_iommu_devres *target = p; ++ ++ return devres->addr == target->addr; ++} ++ ++static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, int order) ++{ ++ struct riscv_iommu_devres *devres; ++ void *addr; ++ ++ addr = iommu_alloc_pages_node(dev_to_node(iommu->dev), ++ GFP_KERNEL_ACCOUNT, order); ++ if (unlikely(!addr)) ++ return NULL; ++ ++ devres = devres_alloc(riscv_iommu_devres_pages_release, ++ sizeof(struct riscv_iommu_devres), GFP_KERNEL); ++ ++ if (unlikely(!devres)) { ++ iommu_free_pages(addr, order); ++ return NULL; ++ } ++ ++ devres->addr = addr; ++ devres->order = order; ++ ++ devres_add(iommu->dev, devres); ++ ++ return addr; ++} ++ ++static void riscv_iommu_free_pages(struct riscv_iommu_device *iommu, void *addr) ++{ ++ struct riscv_iommu_devres devres = { .addr = addr }; ++ ++ devres_release(iommu->dev, riscv_iommu_devres_pages_release, ++ riscv_iommu_devres_pages_match, &devres); ++} ++ ++/* ++ * Hardware queue allocation and management. ++ */ ++ ++/* Setup queue base, control registers and default queue length */ ++#define RISCV_IOMMU_QUEUE_INIT(q, name) do { \ ++ struct riscv_iommu_queue *_q = q; \ ++ _q->qid = RISCV_IOMMU_INTR_ ## name; \ ++ _q->qbr = RISCV_IOMMU_REG_ ## name ## B; \ ++ _q->qcr = RISCV_IOMMU_REG_ ## name ## CSR; \ ++ _q->mask = _q->mask ?: (RISCV_IOMMU_DEF_ ## name ## _COUNT) - 1;\ ++} while (0) ++ ++/* Note: offsets are the same for all queues */ ++#define Q_HEAD(q) ((q)->qbr + (RISCV_IOMMU_REG_CQH - RISCV_IOMMU_REG_CQB)) ++#define Q_TAIL(q) ((q)->qbr + (RISCV_IOMMU_REG_CQT - RISCV_IOMMU_REG_CQB)) ++#define Q_ITEM(q, index) ((q)->mask & (index)) ++#define Q_IPSR(q) BIT((q)->qid) ++ ++/* ++ * Discover queue ring buffer hardware configuration, allocate in-memory ++ * ring buffer or use fixed I/O memory location, configure queue base register. ++ * Must be called before hardware queue is enabled. ++ * ++ * @queue - data structure, configured with RISCV_IOMMU_QUEUE_INIT() ++ * @entry_size - queue single element size in bytes. ++ */ ++static int riscv_iommu_queue_alloc(struct riscv_iommu_device *iommu, ++ struct riscv_iommu_queue *queue, ++ size_t entry_size) ++{ ++ unsigned int logsz; ++ u64 qb, rb; ++ ++ /* ++ * Use WARL base register property to discover maximum allowed ++ * number of entries and optional fixed IO address for queue location. ++ */ ++ riscv_iommu_writeq(iommu, queue->qbr, RISCV_IOMMU_QUEUE_LOG2SZ_FIELD); ++ qb = riscv_iommu_readq(iommu, queue->qbr); ++ ++ /* ++ * Calculate and verify hardware supported queue length, as reported ++ * by the field LOG2SZ, where max queue length is equal to 2^(LOG2SZ + 1). ++ * Update queue size based on hardware supported value. ++ */ ++ logsz = ilog2(queue->mask); ++ if (logsz > FIELD_GET(RISCV_IOMMU_QUEUE_LOG2SZ_FIELD, qb)) ++ logsz = FIELD_GET(RISCV_IOMMU_QUEUE_LOG2SZ_FIELD, qb); ++ ++ /* ++ * Use WARL base register property to discover an optional fixed IO ++ * address for queue ring buffer location. Otherwise allocate contiguous ++ * system memory. ++ */ ++ if (FIELD_GET(RISCV_IOMMU_PPN_FIELD, qb)) { ++ const size_t queue_size = entry_size << (logsz + 1); ++ ++ queue->phys = pfn_to_phys(FIELD_GET(RISCV_IOMMU_PPN_FIELD, qb)); ++ queue->base = devm_ioremap(iommu->dev, queue->phys, queue_size); ++ } else { ++ do { ++ const size_t queue_size = entry_size << (logsz + 1); ++ const int order = get_order(queue_size); ++ ++ queue->base = riscv_iommu_get_pages(iommu, order); ++ queue->phys = __pa(queue->base); ++ } while (!queue->base && logsz-- > 0); ++ } ++ ++ if (!queue->base) ++ return -ENOMEM; ++ ++ qb = phys_to_ppn(queue->phys) | ++ FIELD_PREP(RISCV_IOMMU_QUEUE_LOG2SZ_FIELD, logsz); ++ ++ /* Update base register and read back to verify hw accepted our write */ ++ riscv_iommu_writeq(iommu, queue->qbr, qb); ++ rb = riscv_iommu_readq(iommu, queue->qbr); ++ if (rb != qb) { ++ dev_err(iommu->dev, "queue #%u allocation failed\n", queue->qid); ++ return -ENODEV; ++ } ++ ++ /* Update actual queue mask */ ++ queue->mask = (2U << logsz) - 1; ++ ++ dev_dbg(iommu->dev, "queue #%u allocated 2^%u entries", ++ queue->qid, logsz + 1); ++ ++ return 0; ++} ++ ++/* Check interrupt queue status, IPSR */ ++static irqreturn_t riscv_iommu_queue_ipsr(int irq, void *data) ++{ ++ struct riscv_iommu_queue *queue = (struct riscv_iommu_queue *)data; ++ ++ if (riscv_iommu_readl(queue->iommu, RISCV_IOMMU_REG_IPSR) & Q_IPSR(queue)) ++ return IRQ_WAKE_THREAD; ++ ++ return IRQ_NONE; ++} ++ ++static int riscv_iommu_queue_vec(struct riscv_iommu_device *iommu, int n) ++{ ++ /* Reuse ICVEC.CIV mask for all interrupt vectors mapping. */ ++ return (iommu->icvec >> (n * 4)) & RISCV_IOMMU_ICVEC_CIV; ++} ++ ++/* ++ * Enable queue processing in the hardware, register interrupt handler. ++ * ++ * @queue - data structure, already allocated with riscv_iommu_queue_alloc() ++ * @irq_handler - threaded interrupt handler. ++ */ ++static int riscv_iommu_queue_enable(struct riscv_iommu_device *iommu, ++ struct riscv_iommu_queue *queue, ++ irq_handler_t irq_handler) ++{ ++ const unsigned int irq = iommu->irqs[riscv_iommu_queue_vec(iommu, queue->qid)]; ++ u32 csr; ++ int rc; ++ ++ if (queue->iommu) ++ return -EBUSY; ++ ++ /* Polling not implemented */ ++ if (!irq) ++ return -ENODEV; ++ ++ queue->iommu = iommu; ++ rc = request_threaded_irq(irq, riscv_iommu_queue_ipsr, irq_handler, ++ IRQF_ONESHOT | IRQF_SHARED, ++ dev_name(iommu->dev), queue); ++ if (rc) { ++ queue->iommu = NULL; ++ return rc; ++ } ++ ++ /* ++ * Enable queue with interrupts, clear any memory fault if any. ++ * Wait for the hardware to acknowledge request and activate queue ++ * processing. ++ * Note: All CSR bitfields are in the same offsets for all queues. ++ */ ++ riscv_iommu_writel(iommu, queue->qcr, ++ RISCV_IOMMU_QUEUE_ENABLE | ++ RISCV_IOMMU_QUEUE_INTR_ENABLE | ++ RISCV_IOMMU_QUEUE_MEM_FAULT); ++ ++ riscv_iommu_readl_timeout(iommu, queue->qcr, ++ csr, !(csr & RISCV_IOMMU_QUEUE_BUSY), ++ 10, RISCV_IOMMU_QCSR_TIMEOUT); ++ ++ if (RISCV_IOMMU_QUEUE_ACTIVE != (csr & (RISCV_IOMMU_QUEUE_ACTIVE | ++ RISCV_IOMMU_QUEUE_BUSY | ++ RISCV_IOMMU_QUEUE_MEM_FAULT))) { ++ /* Best effort to stop and disable failing hardware queue. */ ++ riscv_iommu_writel(iommu, queue->qcr, 0); ++ free_irq(irq, queue); ++ queue->iommu = NULL; ++ dev_err(iommu->dev, "queue #%u failed to start\n", queue->qid); ++ return -EBUSY; ++ } ++ ++ /* Clear any pending interrupt flag. */ ++ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_IPSR, Q_IPSR(queue)); ++ ++ return 0; ++} ++ ++/* ++ * Disable queue. Wait for the hardware to acknowledge request and ++ * stop processing enqueued requests. Report errors but continue. ++ */ ++static void riscv_iommu_queue_disable(struct riscv_iommu_queue *queue) ++{ ++ struct riscv_iommu_device *iommu = queue->iommu; ++ u32 csr; ++ ++ if (!iommu) ++ return; ++ ++ free_irq(iommu->irqs[riscv_iommu_queue_vec(iommu, queue->qid)], queue); ++ riscv_iommu_writel(iommu, queue->qcr, 0); ++ riscv_iommu_readl_timeout(iommu, queue->qcr, ++ csr, !(csr & RISCV_IOMMU_QUEUE_BUSY), ++ 10, RISCV_IOMMU_QCSR_TIMEOUT); ++ ++ if (csr & (RISCV_IOMMU_QUEUE_ACTIVE | RISCV_IOMMU_QUEUE_BUSY)) ++ dev_err(iommu->dev, "fail to disable hardware queue #%u, csr 0x%x\n", ++ queue->qid, csr); ++ ++ queue->iommu = NULL; ++} ++ ++/* ++ * Returns number of available valid queue entries and the first item index. ++ * Update shadow producer index if necessary. ++ */ ++static int riscv_iommu_queue_consume(struct riscv_iommu_queue *queue, ++ unsigned int *index) ++{ ++ unsigned int head = atomic_read(&queue->head); ++ unsigned int tail = atomic_read(&queue->tail); ++ unsigned int last = Q_ITEM(queue, tail); ++ int available = (int)(tail - head); ++ ++ *index = head; ++ ++ if (available > 0) ++ return available; ++ ++ /* read hardware producer index, check reserved register bits are not set. */ ++ if (riscv_iommu_readl_timeout(queue->iommu, Q_TAIL(queue), ++ tail, (tail & ~queue->mask) == 0, ++ 0, RISCV_IOMMU_QUEUE_TIMEOUT)) { ++ dev_err_once(queue->iommu->dev, ++ "Hardware error: queue access timeout\n"); ++ return 0; ++ } ++ ++ if (tail == last) ++ return 0; ++ ++ /* update shadow producer index */ ++ return (int)(atomic_add_return((tail - last) & queue->mask, &queue->tail) - head); ++} ++ ++/* ++ * Release processed queue entries, should match riscv_iommu_queue_consume() calls. ++ */ ++static void riscv_iommu_queue_release(struct riscv_iommu_queue *queue, int count) ++{ ++ const unsigned int head = atomic_add_return(count, &queue->head); ++ ++ riscv_iommu_writel(queue->iommu, Q_HEAD(queue), Q_ITEM(queue, head)); ++} ++ ++/* Return actual consumer index based on hardware reported queue head index. */ ++static unsigned int riscv_iommu_queue_cons(struct riscv_iommu_queue *queue) ++{ ++ const unsigned int cons = atomic_read(&queue->head); ++ const unsigned int last = Q_ITEM(queue, cons); ++ unsigned int head; ++ ++ if (riscv_iommu_readl_timeout(queue->iommu, Q_HEAD(queue), head, ++ !(head & ~queue->mask), ++ 0, RISCV_IOMMU_QUEUE_TIMEOUT)) ++ return cons; ++ ++ return cons + ((head - last) & queue->mask); ++} ++ ++/* Wait for submitted item to be processed. */ ++static int riscv_iommu_queue_wait(struct riscv_iommu_queue *queue, ++ unsigned int index, ++ unsigned int timeout_us) ++{ ++ unsigned int cons = atomic_read(&queue->head); ++ ++ /* Already processed by the consumer */ ++ if ((int)(cons - index) > 0) ++ return 0; ++ ++ /* Monitor consumer index */ ++ return readx_poll_timeout(riscv_iommu_queue_cons, queue, cons, ++ (int)(cons - index) > 0, 0, timeout_us); ++} ++ ++/* Enqueue an entry and wait to be processed if timeout_us > 0 ++ * ++ * Error handling for IOMMU hardware not responding in reasonable time ++ * will be added as separate patch series along with other RAS features. ++ * For now, only report hardware failure and continue. ++ */ ++static unsigned int riscv_iommu_queue_send(struct riscv_iommu_queue *queue, ++ void *entry, size_t entry_size) ++{ ++ unsigned int prod; ++ unsigned int head; ++ unsigned int tail; ++ unsigned long flags; ++ ++ /* Do not preempt submission flow. */ ++ local_irq_save(flags); ++ ++ /* 1. Allocate some space in the queue */ ++ prod = atomic_inc_return(&queue->prod) - 1; ++ head = atomic_read(&queue->head); ++ ++ /* 2. Wait for space availability. */ ++ if ((prod - head) > queue->mask) { ++ if (readx_poll_timeout(atomic_read, &queue->head, ++ head, (prod - head) < queue->mask, ++ 0, RISCV_IOMMU_QUEUE_TIMEOUT)) ++ goto err_busy; ++ } else if ((prod - head) == queue->mask) { ++ const unsigned int last = Q_ITEM(queue, head); ++ ++ if (riscv_iommu_readl_timeout(queue->iommu, Q_HEAD(queue), head, ++ !(head & ~queue->mask) && head != last, ++ 0, RISCV_IOMMU_QUEUE_TIMEOUT)) ++ goto err_busy; ++ atomic_add((head - last) & queue->mask, &queue->head); ++ } ++ ++ /* 3. Store entry in the ring buffer */ ++ memcpy(queue->base + Q_ITEM(queue, prod) * entry_size, entry, entry_size); ++ ++ /* 4. Wait for all previous entries to be ready */ ++ if (readx_poll_timeout(atomic_read, &queue->tail, tail, prod == tail, ++ 0, RISCV_IOMMU_QUEUE_TIMEOUT)) ++ goto err_busy; ++ ++ /* ++ * 5. Make sure the ring buffer update (whether in normal or I/O memory) is ++ * completed and visible before signaling the tail doorbell to fetch ++ * the next command. 'fence ow, ow' ++ */ ++ dma_wmb(); ++ riscv_iommu_writel(queue->iommu, Q_TAIL(queue), Q_ITEM(queue, prod + 1)); ++ ++ /* ++ * 6. Make sure the doorbell write to the device has finished before updating ++ * the shadow tail index in normal memory. 'fence o, w' ++ */ ++ mmiowb(); ++ atomic_inc(&queue->tail); ++ ++ /* 7. Complete submission and restore local interrupts */ ++ local_irq_restore(flags); ++ ++ return prod; ++ ++err_busy: ++ local_irq_restore(flags); ++ dev_err_once(queue->iommu->dev, "Hardware error: command enqueue failed\n"); ++ ++ return prod; ++} ++ ++/* ++ * IOMMU Command queue chapter 3.1 ++ */ ++ ++/* Command queue interrupt handler thread function */ ++static irqreturn_t riscv_iommu_cmdq_process(int irq, void *data) ++{ ++ const struct riscv_iommu_queue *queue = (struct riscv_iommu_queue *)data; ++ unsigned int ctrl; ++ ++ /* Clear MF/CQ errors, complete error recovery to be implemented. */ ++ ctrl = riscv_iommu_readl(queue->iommu, queue->qcr); ++ if (ctrl & (RISCV_IOMMU_CQCSR_CQMF | RISCV_IOMMU_CQCSR_CMD_TO | ++ RISCV_IOMMU_CQCSR_CMD_ILL | RISCV_IOMMU_CQCSR_FENCE_W_IP)) { ++ riscv_iommu_writel(queue->iommu, queue->qcr, ctrl); ++ dev_warn(queue->iommu->dev, ++ "Queue #%u error; fault:%d timeout:%d illegal:%d fence_w_ip:%d\n", ++ queue->qid, ++ !!(ctrl & RISCV_IOMMU_CQCSR_CQMF), ++ !!(ctrl & RISCV_IOMMU_CQCSR_CMD_TO), ++ !!(ctrl & RISCV_IOMMU_CQCSR_CMD_ILL), ++ !!(ctrl & RISCV_IOMMU_CQCSR_FENCE_W_IP)); ++ } ++ ++ /* Placeholder for command queue interrupt notifiers */ ++ ++ /* Clear command interrupt pending. */ ++ riscv_iommu_writel(queue->iommu, RISCV_IOMMU_REG_IPSR, Q_IPSR(queue)); ++ ++ return IRQ_HANDLED; ++} ++ ++/* Send command to the IOMMU command queue */ ++static void riscv_iommu_cmd_send(struct riscv_iommu_device *iommu, ++ struct riscv_iommu_command *cmd) ++{ ++ riscv_iommu_queue_send(&iommu->cmdq, cmd, sizeof(*cmd)); ++} ++ ++/* Send IOFENCE.C command and wait for all scheduled commands to complete. */ ++static void riscv_iommu_cmd_sync(struct riscv_iommu_device *iommu, ++ unsigned int timeout_us) ++{ ++ struct riscv_iommu_command cmd; ++ unsigned int prod; ++ ++ riscv_iommu_cmd_iofence(&cmd); ++ prod = riscv_iommu_queue_send(&iommu->cmdq, &cmd, sizeof(cmd)); ++ ++ if (!timeout_us) ++ return; ++ ++ if (riscv_iommu_queue_wait(&iommu->cmdq, prod, timeout_us)) ++ dev_err_once(iommu->dev, ++ "Hardware error: command execution timeout\n"); ++} ++ ++/* ++ * IOMMU Fault/Event queue chapter 3.2 ++ */ ++ ++static void riscv_iommu_fault(struct riscv_iommu_device *iommu, ++ struct riscv_iommu_fq_record *event) ++{ ++ unsigned int err = FIELD_GET(RISCV_IOMMU_FQ_HDR_CAUSE, event->hdr); ++ unsigned int devid = FIELD_GET(RISCV_IOMMU_FQ_HDR_DID, event->hdr); ++ ++ /* Placeholder for future fault handling implementation, report only. */ ++ if (err) ++ dev_warn_ratelimited(iommu->dev, ++ "Fault %d devid: 0x%x iotval: %llx iotval2: %llx\n", ++ err, devid, event->iotval, event->iotval2); ++} ++ ++/* Fault queue interrupt handler thread function */ ++static irqreturn_t riscv_iommu_fltq_process(int irq, void *data) ++{ ++ struct riscv_iommu_queue *queue = (struct riscv_iommu_queue *)data; ++ struct riscv_iommu_device *iommu = queue->iommu; ++ struct riscv_iommu_fq_record *events; ++ unsigned int ctrl, idx; ++ int cnt, len; ++ ++ events = (struct riscv_iommu_fq_record *)queue->base; ++ ++ /* Clear fault interrupt pending and process all received fault events. */ ++ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_IPSR, Q_IPSR(queue)); ++ ++ do { ++ cnt = riscv_iommu_queue_consume(queue, &idx); ++ for (len = 0; len < cnt; idx++, len++) ++ riscv_iommu_fault(iommu, &events[Q_ITEM(queue, idx)]); ++ riscv_iommu_queue_release(queue, cnt); ++ } while (cnt > 0); ++ ++ /* Clear MF/OF errors, complete error recovery to be implemented. */ ++ ctrl = riscv_iommu_readl(iommu, queue->qcr); ++ if (ctrl & (RISCV_IOMMU_FQCSR_FQMF | RISCV_IOMMU_FQCSR_FQOF)) { ++ riscv_iommu_writel(iommu, queue->qcr, ctrl); ++ dev_warn(iommu->dev, ++ "Queue #%u error; memory fault:%d overflow:%d\n", ++ queue->qid, ++ !!(ctrl & RISCV_IOMMU_FQCSR_FQMF), ++ !!(ctrl & RISCV_IOMMU_FQCSR_FQOF)); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++/* Lookup and initialize device context info structure. */ ++static struct riscv_iommu_dc *riscv_iommu_get_dc(struct riscv_iommu_device *iommu, ++ unsigned int devid) ++{ ++ const bool base_format = !(iommu->caps & RISCV_IOMMU_CAPABILITIES_MSI_FLAT); ++ unsigned int depth; ++ unsigned long ddt, old, new; ++ void *ptr; ++ u8 ddi_bits[3] = { 0 }; ++ u64 *ddtp = NULL; ++ ++ /* Make sure the mode is valid */ ++ if (iommu->ddt_mode < RISCV_IOMMU_DDTP_IOMMU_MODE_1LVL || ++ iommu->ddt_mode > RISCV_IOMMU_DDTP_IOMMU_MODE_3LVL) ++ return NULL; ++ ++ /* ++ * Device id partitioning for base format: ++ * DDI[0]: bits 0 - 6 (1st level) (7 bits) ++ * DDI[1]: bits 7 - 15 (2nd level) (9 bits) ++ * DDI[2]: bits 16 - 23 (3rd level) (8 bits) ++ * ++ * For extended format: ++ * DDI[0]: bits 0 - 5 (1st level) (6 bits) ++ * DDI[1]: bits 6 - 14 (2nd level) (9 bits) ++ * DDI[2]: bits 15 - 23 (3rd level) (9 bits) ++ */ ++ if (base_format) { ++ ddi_bits[0] = 7; ++ ddi_bits[1] = 7 + 9; ++ ddi_bits[2] = 7 + 9 + 8; ++ } else { ++ ddi_bits[0] = 6; ++ ddi_bits[1] = 6 + 9; ++ ddi_bits[2] = 6 + 9 + 9; ++ } ++ ++ /* Make sure device id is within range */ ++ depth = iommu->ddt_mode - RISCV_IOMMU_DDTP_IOMMU_MODE_1LVL; ++ if (devid >= (1 << ddi_bits[depth])) ++ return NULL; ++ ++ /* Get to the level of the non-leaf node that holds the device context */ ++ for (ddtp = iommu->ddt_root; depth-- > 0;) { ++ const int split = ddi_bits[depth]; ++ /* ++ * Each non-leaf node is 64bits wide and on each level ++ * nodes are indexed by DDI[depth]. ++ */ ++ ddtp += (devid >> split) & 0x1FF; ++ ++ /* ++ * Check if this node has been populated and if not ++ * allocate a new level and populate it. ++ */ ++ do { ++ ddt = READ_ONCE(*(unsigned long *)ddtp); ++ if (ddt & RISCV_IOMMU_DDTE_V) { ++ ddtp = __va(ppn_to_phys(ddt)); ++ break; ++ } ++ ++ ptr = riscv_iommu_get_pages(iommu, 0); ++ if (!ptr) ++ return NULL; ++ ++ new = phys_to_ppn(__pa(ptr)) | RISCV_IOMMU_DDTE_V; ++ old = cmpxchg_relaxed((unsigned long *)ddtp, ddt, new); ++ ++ if (old == ddt) { ++ ddtp = (u64 *)ptr; ++ break; ++ } ++ ++ /* Race setting DDT detected, re-read and retry. */ ++ riscv_iommu_free_pages(iommu, ptr); ++ } while (1); ++ } ++ ++ /* ++ * Grab the node that matches DDI[depth], note that when using base ++ * format the device context is 4 * 64bits, and the extended format ++ * is 8 * 64bits, hence the (3 - base_format) below. ++ */ ++ ddtp += (devid & ((64 << base_format) - 1)) << (3 - base_format); ++ ++ return (struct riscv_iommu_dc *)ddtp; ++} ++ ++/* ++ * This is best effort IOMMU translation shutdown flow. ++ * Disable IOMMU without waiting for hardware response. ++ */ ++static void riscv_iommu_disable(struct riscv_iommu_device *iommu) ++{ ++ riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP, 0); ++ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_CQCSR, 0); ++ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FQCSR, 0); ++ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_PQCSR, 0); ++} ++ ++#define riscv_iommu_read_ddtp(iommu) ({ \ ++ u64 ddtp; \ ++ riscv_iommu_readq_timeout((iommu), RISCV_IOMMU_REG_DDTP, ddtp, \ ++ !(ddtp & RISCV_IOMMU_DDTP_BUSY), 10, \ ++ RISCV_IOMMU_DDTP_TIMEOUT); \ ++ ddtp; }) ++ ++static int riscv_iommu_iodir_alloc(struct riscv_iommu_device *iommu) ++{ ++ u64 ddtp; ++ unsigned int mode; ++ ++ ddtp = riscv_iommu_read_ddtp(iommu); ++ if (ddtp & RISCV_IOMMU_DDTP_BUSY) ++ return -EBUSY; ++ ++ /* ++ * It is optional for the hardware to report a fixed address for device ++ * directory root page when DDT.MODE is OFF or BARE. ++ */ ++ mode = FIELD_GET(RISCV_IOMMU_DDTP_IOMMU_MODE, ddtp); ++ if (mode == RISCV_IOMMU_DDTP_IOMMU_MODE_BARE || ++ mode == RISCV_IOMMU_DDTP_IOMMU_MODE_OFF) { ++ /* Use WARL to discover hardware fixed DDT PPN */ ++ riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP, ++ FIELD_PREP(RISCV_IOMMU_DDTP_IOMMU_MODE, mode)); ++ ddtp = riscv_iommu_read_ddtp(iommu); ++ if (ddtp & RISCV_IOMMU_DDTP_BUSY) ++ return -EBUSY; ++ ++ iommu->ddt_phys = ppn_to_phys(ddtp); ++ if (iommu->ddt_phys) ++ iommu->ddt_root = devm_ioremap(iommu->dev, ++ iommu->ddt_phys, PAGE_SIZE); ++ if (iommu->ddt_root) ++ memset(iommu->ddt_root, 0, PAGE_SIZE); ++ } ++ ++ if (!iommu->ddt_root) { ++ iommu->ddt_root = riscv_iommu_get_pages(iommu, 0); ++ iommu->ddt_phys = __pa(iommu->ddt_root); ++ } ++ ++ if (!iommu->ddt_root) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++/* ++ * Discover supported DDT modes starting from requested value, ++ * configure DDTP register with accepted mode and root DDT address. ++ * Accepted iommu->ddt_mode is updated on success. ++ */ ++static int riscv_iommu_iodir_set_mode(struct riscv_iommu_device *iommu, ++ unsigned int ddtp_mode) ++{ ++ struct device *dev = iommu->dev; ++ u64 ddtp, rq_ddtp; ++ unsigned int mode, rq_mode = ddtp_mode; ++ struct riscv_iommu_command cmd; ++ ++ ddtp = riscv_iommu_read_ddtp(iommu); ++ if (ddtp & RISCV_IOMMU_DDTP_BUSY) ++ return -EBUSY; ++ ++ /* Disallow state transition from xLVL to xLVL. */ ++ mode = FIELD_GET(RISCV_IOMMU_DDTP_IOMMU_MODE, ddtp); ++ if (mode != RISCV_IOMMU_DDTP_IOMMU_MODE_BARE && ++ mode != RISCV_IOMMU_DDTP_IOMMU_MODE_OFF && ++ rq_mode != RISCV_IOMMU_DDTP_IOMMU_MODE_BARE && ++ rq_mode != RISCV_IOMMU_DDTP_IOMMU_MODE_OFF) ++ return -EINVAL; ++ ++ do { ++ rq_ddtp = FIELD_PREP(RISCV_IOMMU_DDTP_IOMMU_MODE, rq_mode); ++ if (rq_mode > RISCV_IOMMU_DDTP_IOMMU_MODE_BARE) ++ rq_ddtp |= phys_to_ppn(iommu->ddt_phys); ++ ++ riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP, rq_ddtp); ++ ddtp = riscv_iommu_read_ddtp(iommu); ++ if (ddtp & RISCV_IOMMU_DDTP_BUSY) { ++ dev_err(dev, "timeout when setting ddtp (ddt mode: %u, read: %llx)\n", ++ rq_mode, ddtp); ++ return -EBUSY; ++ } ++ ++ /* Verify IOMMU hardware accepts new DDTP config. */ ++ mode = FIELD_GET(RISCV_IOMMU_DDTP_IOMMU_MODE, ddtp); ++ ++ if (rq_mode == mode) ++ break; ++ ++ /* Hardware mandatory DDTP mode has not been accepted. */ ++ if (rq_mode < RISCV_IOMMU_DDTP_IOMMU_MODE_1LVL && rq_ddtp != ddtp) { ++ dev_err(dev, "DDTP update failed hw: %llx vs %llx\n", ++ ddtp, rq_ddtp); ++ return -EINVAL; ++ } ++ ++ /* ++ * Mode field is WARL, an IOMMU may support a subset of ++ * directory table levels in which case if we tried to set ++ * an unsupported number of levels we'll readback either ++ * a valid xLVL or off/bare. If we got off/bare, try again ++ * with a smaller xLVL. ++ */ ++ if (mode < RISCV_IOMMU_DDTP_IOMMU_MODE_1LVL && ++ rq_mode > RISCV_IOMMU_DDTP_IOMMU_MODE_1LVL) { ++ dev_dbg(dev, "DDTP hw mode %u vs %u\n", mode, rq_mode); ++ rq_mode--; ++ continue; ++ } ++ ++ /* ++ * We tried all supported modes and IOMMU hardware failed to ++ * accept new settings, something went very wrong since off/bare ++ * and at least one xLVL must be supported. ++ */ ++ dev_err(dev, "DDTP hw mode %u, failed to set %u\n", ++ mode, ddtp_mode); ++ return -EINVAL; ++ } while (1); ++ ++ iommu->ddt_mode = mode; ++ if (mode != ddtp_mode) ++ dev_dbg(dev, "DDTP hw mode %u, requested %u\n", mode, ddtp_mode); ++ ++ /* Invalidate device context cache */ ++ riscv_iommu_cmd_iodir_inval_ddt(&cmd); ++ riscv_iommu_cmd_send(iommu, &cmd); ++ ++ /* Invalidate address translation cache */ ++ riscv_iommu_cmd_inval_vma(&cmd); ++ riscv_iommu_cmd_send(iommu, &cmd); ++ ++ /* IOFENCE.C */ ++ riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT); ++ ++ return 0; ++} ++ ++/* This struct contains protection domain specific IOMMU driver data. */ ++struct riscv_iommu_domain { ++ struct iommu_domain domain; ++ struct list_head bonds; ++ spinlock_t lock; /* protect bonds list updates. */ ++ int pscid; ++ bool amo_enabled; ++ int numa_node; ++ unsigned int pgd_mode; ++ unsigned long *pgd_root; ++}; ++ ++#define iommu_domain_to_riscv(iommu_domain) \ ++ container_of(iommu_domain, struct riscv_iommu_domain, domain) ++ ++/* Private IOMMU data for managed devices, dev_iommu_priv_* */ ++struct riscv_iommu_info { ++ struct riscv_iommu_domain *domain; ++}; ++ ++/* ++ * Linkage between an iommu_domain and attached devices. ++ * ++ * Protection domain requiring IOATC and DevATC translation cache invalidations, ++ * should be linked to attached devices using a riscv_iommu_bond structure. ++ * Devices should be linked to the domain before first use and unlinked after ++ * the translations from the referenced protection domain can no longer be used. ++ * Blocking and identity domains are not tracked here, as the IOMMU hardware ++ * does not cache negative and/or identity (BARE mode) translations, and DevATC ++ * is disabled for those protection domains. ++ * ++ * The device pointer and IOMMU data remain stable in the bond struct after ++ * _probe_device() where it's attached to the managed IOMMU, up to the ++ * completion of the _release_device() call. The release of the bond structure ++ * is synchronized with the device release. ++ */ ++struct riscv_iommu_bond { ++ struct list_head list; ++ struct rcu_head rcu; ++ struct device *dev; ++}; ++ ++static int riscv_iommu_bond_link(struct riscv_iommu_domain *domain, ++ struct device *dev) ++{ ++ struct riscv_iommu_device *iommu = dev_to_iommu(dev); ++ struct riscv_iommu_bond *bond; ++ struct list_head *bonds; ++ ++ bond = kzalloc(sizeof(*bond), GFP_KERNEL); ++ if (!bond) ++ return -ENOMEM; ++ bond->dev = dev; ++ ++ /* ++ * List of devices attached to the domain is arranged based on ++ * managed IOMMU device. ++ */ ++ ++ spin_lock(&domain->lock); ++ list_for_each(bonds, &domain->bonds) ++ if (dev_to_iommu(list_entry(bonds, struct riscv_iommu_bond, list)->dev) == iommu) ++ break; ++ list_add_rcu(&bond->list, bonds); ++ spin_unlock(&domain->lock); ++ ++ /* Synchronize with riscv_iommu_iotlb_inval() sequence. See comment below. */ ++ smp_mb(); ++ ++ return 0; ++} ++ ++static void riscv_iommu_bond_unlink(struct riscv_iommu_domain *domain, ++ struct device *dev) ++{ ++ struct riscv_iommu_device *iommu = dev_to_iommu(dev); ++ struct riscv_iommu_bond *bond, *found = NULL; ++ struct riscv_iommu_command cmd; ++ int count = 0; ++ ++ if (!domain) ++ return; ++ ++ spin_lock(&domain->lock); ++ list_for_each_entry(bond, &domain->bonds, list) { ++ if (found && count) ++ break; ++ else if (bond->dev == dev) ++ found = bond; ++ else if (dev_to_iommu(bond->dev) == iommu) ++ count++; ++ } ++ if (found) ++ list_del_rcu(&found->list); ++ spin_unlock(&domain->lock); ++ kfree_rcu(found, rcu); ++ ++ /* ++ * If this was the last bond between this domain and the IOMMU ++ * invalidate all cached entries for domain's PSCID. ++ */ ++ if (!count) { ++ riscv_iommu_cmd_inval_vma(&cmd); ++ riscv_iommu_cmd_inval_set_pscid(&cmd, domain->pscid); ++ riscv_iommu_cmd_send(iommu, &cmd); ++ ++ riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT); ++ } ++} ++ ++/* ++ * Send IOTLB.INVAL for whole address space for ranges larger than 2MB. ++ * This limit will be replaced with range invalidations, if supported by ++ * the hardware, when RISC-V IOMMU architecture specification update for ++ * range invalidations update will be available. ++ */ ++#define RISCV_IOMMU_IOTLB_INVAL_LIMIT (2 << 20) ++ ++static void riscv_iommu_iotlb_inval(struct riscv_iommu_domain *domain, ++ unsigned long start, unsigned long end) ++{ ++ struct riscv_iommu_bond *bond; ++ struct riscv_iommu_device *iommu, *prev; ++ struct riscv_iommu_command cmd; ++ unsigned long len = end - start + 1; ++ unsigned long iova; ++ ++ /* ++ * For each IOMMU linked with this protection domain (via bonds->dev), ++ * an IOTLB invaliation command will be submitted and executed. ++ * ++ * Possbile race with domain attach flow is handled by sequencing ++ * bond creation - riscv_iommu_bond_link(), and device directory ++ * update - riscv_iommu_iodir_update(). ++ * ++ * PTE Update / IOTLB Inval Device attach & directory update ++ * -------------------------- -------------------------- ++ * update page table entries add dev to the bond list ++ * FENCE RW,RW FENCE RW,RW ++ * For all IOMMUs: (can be empty) Update FSC/PSCID ++ * FENCE IOW,IOW FENCE IOW,IOW ++ * IOTLB.INVAL IODIR.INVAL ++ * IOFENCE.C ++ * ++ * If bond list is not updated with new device, directory context will ++ * be configured with already valid page table content. If an IOMMU is ++ * linked to the protection domain it will receive invalidation ++ * requests for updated page table entries. ++ */ ++ smp_mb(); ++ ++ rcu_read_lock(); ++ ++ prev = NULL; ++ list_for_each_entry_rcu(bond, &domain->bonds, list) { ++ iommu = dev_to_iommu(bond->dev); ++ ++ /* ++ * IOTLB invalidation request can be safely omitted if already sent ++ * to the IOMMU for the same PSCID, and with domain->bonds list ++ * arranged based on the device's IOMMU, it's sufficient to check ++ * last device the invalidation was sent to. ++ */ ++ if (iommu == prev) ++ continue; ++ ++ riscv_iommu_cmd_inval_vma(&cmd); ++ riscv_iommu_cmd_inval_set_pscid(&cmd, domain->pscid); ++ if (len && len < RISCV_IOMMU_IOTLB_INVAL_LIMIT) { ++ for (iova = start; iova < end; iova += PAGE_SIZE) { ++ riscv_iommu_cmd_inval_set_addr(&cmd, iova); ++ riscv_iommu_cmd_send(iommu, &cmd); ++ } ++ } else { ++ riscv_iommu_cmd_send(iommu, &cmd); ++ } ++ prev = iommu; ++ } ++ ++ prev = NULL; ++ list_for_each_entry_rcu(bond, &domain->bonds, list) { ++ iommu = dev_to_iommu(bond->dev); ++ if (iommu == prev) ++ continue; ++ ++ riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT); ++ prev = iommu; ++ } ++ rcu_read_unlock(); ++} ++ ++#define RISCV_IOMMU_FSC_BARE 0 ++ ++/* ++ * Update IODIR for the device. ++ * ++ * During the execution of riscv_iommu_probe_device(), IODIR entries are ++ * allocated for the device's identifiers. Device context invalidation ++ * becomes necessary only if one of the updated entries was previously ++ * marked as valid, given that invalid device context entries are not ++ * cached by the IOMMU hardware. ++ * In this implementation, updating a valid device context while the ++ * device is not quiesced might be disruptive, potentially causing ++ * interim translation faults. ++ */ ++static void riscv_iommu_iodir_update(struct riscv_iommu_device *iommu, ++ struct device *dev, u64 fsc, u64 ta) ++{ ++ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); ++ struct riscv_iommu_dc *dc; ++ struct riscv_iommu_command cmd; ++ bool sync_required = false; ++ u64 tc; ++ int i; ++ ++ for (i = 0; i < fwspec->num_ids; i++) { ++ dc = riscv_iommu_get_dc(iommu, fwspec->ids[i]); ++ tc = READ_ONCE(dc->tc); ++ if (!(tc & RISCV_IOMMU_DC_TC_V)) ++ continue; ++ ++ WRITE_ONCE(dc->tc, tc & ~RISCV_IOMMU_DC_TC_V); ++ ++ /* Invalidate device context cached values */ ++ riscv_iommu_cmd_iodir_inval_ddt(&cmd); ++ riscv_iommu_cmd_iodir_set_did(&cmd, fwspec->ids[i]); ++ riscv_iommu_cmd_send(iommu, &cmd); ++ sync_required = true; ++ } ++ ++ if (sync_required) ++ riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT); ++ ++ /* ++ * For device context with DC_TC_PDTV = 0, translation attributes valid bit ++ * is stored as DC_TC_V bit (both sharing the same location at BIT(0)). ++ */ ++ for (i = 0; i < fwspec->num_ids; i++) { ++ dc = riscv_iommu_get_dc(iommu, fwspec->ids[i]); ++ tc = READ_ONCE(dc->tc); ++ tc |= ta & RISCV_IOMMU_DC_TC_V; ++ ++ WRITE_ONCE(dc->fsc, fsc); ++ WRITE_ONCE(dc->ta, ta & RISCV_IOMMU_PC_TA_PSCID); ++ /* Update device context, write TC.V as the last step. */ ++ dma_wmb(); ++ WRITE_ONCE(dc->tc, tc); ++ ++ /* Invalidate device context after update */ ++ riscv_iommu_cmd_iodir_inval_ddt(&cmd); ++ riscv_iommu_cmd_iodir_set_did(&cmd, fwspec->ids[i]); ++ riscv_iommu_cmd_send(iommu, &cmd); ++ } ++ ++ riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT); ++} ++ ++/* ++ * IOVA page translation tree management. ++ */ ++ ++static void riscv_iommu_iotlb_flush_all(struct iommu_domain *iommu_domain) ++{ ++ struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); ++ ++ riscv_iommu_iotlb_inval(domain, 0, ULONG_MAX); ++} ++ ++static void riscv_iommu_iotlb_sync(struct iommu_domain *iommu_domain, ++ struct iommu_iotlb_gather *gather) ++{ ++ struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); ++ ++ riscv_iommu_iotlb_inval(domain, gather->start, gather->end); ++} ++ ++#define PT_SHIFT (PAGE_SHIFT - ilog2(sizeof(pte_t))) ++ ++#define _io_pte_present(pte) ((pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) ++#define _io_pte_leaf(pte) ((pte) & _PAGE_LEAF) ++#define _io_pte_none(pte) ((pte) == 0) ++#define _io_pte_entry(pn, prot) ((_PAGE_PFN_MASK & ((pn) << _PAGE_PFN_SHIFT)) | (prot)) ++ ++static void riscv_iommu_pte_free(struct riscv_iommu_domain *domain, ++ unsigned long pte, struct list_head *freelist) ++{ ++ unsigned long *ptr; ++ int i; ++ ++ if (!_io_pte_present(pte) || _io_pte_leaf(pte)) ++ return; ++ ++ ptr = (unsigned long *)pfn_to_virt(__page_val_to_pfn(pte)); ++ ++ /* Recursively free all sub page table pages */ ++ for (i = 0; i < PTRS_PER_PTE; i++) { ++ pte = READ_ONCE(ptr[i]); ++ if (!_io_pte_none(pte) && cmpxchg_relaxed(ptr + i, pte, 0) == pte) ++ riscv_iommu_pte_free(domain, pte, freelist); ++ } ++ ++ if (freelist) ++ list_add_tail(&virt_to_page(ptr)->lru, freelist); ++ else ++ iommu_free_page(ptr); ++} ++ ++static unsigned long *riscv_iommu_pte_alloc(struct riscv_iommu_domain *domain, ++ unsigned long iova, size_t pgsize, ++ gfp_t gfp) ++{ ++ unsigned long *ptr = domain->pgd_root; ++ unsigned long pte, old; ++ int level = domain->pgd_mode - RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39 + 2; ++ void *addr; ++ ++ do { ++ const int shift = PAGE_SHIFT + PT_SHIFT * level; ++ ++ ptr += ((iova >> shift) & (PTRS_PER_PTE - 1)); ++ /* ++ * Note: returned entry might be a non-leaf if there was ++ * existing mapping with smaller granularity. Up to the caller ++ * to replace and invalidate. ++ */ ++ if (((size_t)1 << shift) == pgsize) ++ return ptr; ++pte_retry: ++ pte = READ_ONCE(*ptr); ++ /* ++ * This is very likely incorrect as we should not be adding ++ * new mapping with smaller granularity on top ++ * of existing 2M/1G mapping. Fail. ++ */ ++ if (_io_pte_present(pte) && _io_pte_leaf(pte)) ++ return NULL; ++ /* ++ * Non-leaf entry is missing, allocate and try to add to the ++ * page table. This might race with other mappings, retry. ++ */ ++ if (_io_pte_none(pte)) { ++ addr = iommu_alloc_page_node(domain->numa_node, gfp); ++ if (!addr) ++ return NULL; ++ old = pte; ++ pte = _io_pte_entry(virt_to_pfn(addr), _PAGE_TABLE); ++ if (cmpxchg_relaxed(ptr, old, pte) != old) { ++ iommu_free_page(addr); ++ goto pte_retry; ++ } ++ } ++ ptr = (unsigned long *)pfn_to_virt(__page_val_to_pfn(pte)); ++ } while (level-- > 0); ++ ++ return NULL; ++} ++ ++static unsigned long *riscv_iommu_pte_fetch(struct riscv_iommu_domain *domain, ++ unsigned long iova, size_t *pte_pgsize) ++{ ++ unsigned long *ptr = domain->pgd_root; ++ unsigned long pte; ++ int level = domain->pgd_mode - RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39 + 2; ++ ++ do { ++ const int shift = PAGE_SHIFT + PT_SHIFT * level; ++ ++ ptr += ((iova >> shift) & (PTRS_PER_PTE - 1)); ++ pte = READ_ONCE(*ptr); ++ if (_io_pte_present(pte) && _io_pte_leaf(pte)) { ++ *pte_pgsize = (size_t)1 << shift; ++ return ptr; ++ } ++ if (_io_pte_none(pte)) ++ return NULL; ++ ptr = (unsigned long *)pfn_to_virt(__page_val_to_pfn(pte)); ++ } while (level-- > 0); ++ ++ return NULL; ++} ++ ++static int riscv_iommu_map_pages(struct iommu_domain *iommu_domain, ++ unsigned long iova, phys_addr_t phys, ++ size_t pgsize, size_t pgcount, int prot, ++ gfp_t gfp, size_t *mapped) ++{ ++ struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); ++ size_t size = 0; ++ unsigned long *ptr; ++ unsigned long pte, old, pte_prot; ++ int rc = 0; ++ LIST_HEAD(freelist); ++ ++ if (!(prot & IOMMU_WRITE)) ++ pte_prot = _PAGE_BASE | _PAGE_READ; ++ else if (domain->amo_enabled) ++ pte_prot = _PAGE_BASE | _PAGE_READ | _PAGE_WRITE; ++ else ++ pte_prot = _PAGE_BASE | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY; ++ ++ while (pgcount) { ++ ptr = riscv_iommu_pte_alloc(domain, iova, pgsize, gfp); ++ if (!ptr) { ++ rc = -ENOMEM; ++ break; ++ } ++ ++ old = READ_ONCE(*ptr); ++ pte = _io_pte_entry(phys_to_pfn(phys), pte_prot); ++ if (cmpxchg_relaxed(ptr, old, pte) != old) ++ continue; ++ ++ riscv_iommu_pte_free(domain, old, &freelist); ++ ++ size += pgsize; ++ iova += pgsize; ++ phys += pgsize; ++ --pgcount; ++ } ++ ++ *mapped = size; ++ ++ if (!list_empty(&freelist)) { ++ /* ++ * In 1.0 spec version, the smallest scope we can use to ++ * invalidate all levels of page table (i.e. leaf and non-leaf) ++ * is an invalidate-all-PSCID IOTINVAL.VMA with AV=0. ++ * This will be updated with hardware support for ++ * capability.NL (non-leaf) IOTINVAL command. ++ */ ++ riscv_iommu_iotlb_inval(domain, 0, ULONG_MAX); ++ iommu_put_pages_list(&freelist); ++ } ++ ++ return rc; ++} ++ ++static size_t riscv_iommu_unmap_pages(struct iommu_domain *iommu_domain, ++ unsigned long iova, size_t pgsize, ++ size_t pgcount, ++ struct iommu_iotlb_gather *gather) ++{ ++ struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); ++ size_t size = pgcount << __ffs(pgsize); ++ unsigned long *ptr, old; ++ size_t unmapped = 0; ++ size_t pte_size; ++ ++ while (unmapped < size) { ++ ptr = riscv_iommu_pte_fetch(domain, iova, &pte_size); ++ if (!ptr) ++ return unmapped; ++ ++ /* partial unmap is not allowed, fail. */ ++ if (iova & (pte_size - 1)) ++ return unmapped; ++ ++ old = READ_ONCE(*ptr); ++ if (cmpxchg_relaxed(ptr, old, 0) != old) ++ continue; ++ ++ iommu_iotlb_gather_add_page(&domain->domain, gather, iova, ++ pte_size); ++ ++ iova += pte_size; ++ unmapped += pte_size; ++ } ++ ++ return unmapped; ++} ++ ++static phys_addr_t riscv_iommu_iova_to_phys(struct iommu_domain *iommu_domain, ++ dma_addr_t iova) ++{ ++ struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); ++ unsigned long pte_size; ++ unsigned long *ptr; ++ ++ ptr = riscv_iommu_pte_fetch(domain, iova, &pte_size); ++ if (_io_pte_none(*ptr) || !_io_pte_present(*ptr)) ++ return 0; ++ ++ return pfn_to_phys(__page_val_to_pfn(*ptr)) | (iova & (pte_size - 1)); ++} ++ ++static void riscv_iommu_free_paging_domain(struct iommu_domain *iommu_domain) ++{ ++ struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); ++ const unsigned long pfn = virt_to_pfn(domain->pgd_root); ++ ++ WARN_ON(!list_empty(&domain->bonds)); ++ ++ if ((int)domain->pscid > 0) ++ ida_free(&riscv_iommu_pscids, domain->pscid); ++ ++ riscv_iommu_pte_free(domain, _io_pte_entry(pfn, _PAGE_TABLE), NULL); ++ kfree(domain); ++} ++ ++static bool riscv_iommu_pt_supported(struct riscv_iommu_device *iommu, int pgd_mode) ++{ ++ switch (pgd_mode) { ++ case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39: ++ return iommu->caps & RISCV_IOMMU_CAPABILITIES_SV39; ++ ++ case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV48: ++ return iommu->caps & RISCV_IOMMU_CAPABILITIES_SV48; ++ ++ case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV57: ++ return iommu->caps & RISCV_IOMMU_CAPABILITIES_SV57; ++ } ++ return false; ++} ++ ++static int riscv_iommu_attach_paging_domain(struct iommu_domain *iommu_domain, ++ struct device *dev) ++{ ++ struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); ++ struct riscv_iommu_device *iommu = dev_to_iommu(dev); ++ struct riscv_iommu_info *info = dev_iommu_priv_get(dev); ++ u64 fsc, ta; ++ ++ if (!riscv_iommu_pt_supported(iommu, domain->pgd_mode)) ++ return -ENODEV; ++ ++ fsc = FIELD_PREP(RISCV_IOMMU_PC_FSC_MODE, domain->pgd_mode) | ++ FIELD_PREP(RISCV_IOMMU_PC_FSC_PPN, virt_to_pfn(domain->pgd_root)); ++ ta = FIELD_PREP(RISCV_IOMMU_PC_TA_PSCID, domain->pscid) | ++ RISCV_IOMMU_PC_TA_V; ++ ++ if (riscv_iommu_bond_link(domain, dev)) ++ return -ENOMEM; ++ ++ riscv_iommu_iodir_update(iommu, dev, fsc, ta); ++ riscv_iommu_bond_unlink(info->domain, dev); ++ info->domain = domain; ++ ++ return 0; ++} ++ ++static const struct iommu_domain_ops riscv_iommu_paging_domain_ops = { ++ .attach_dev = riscv_iommu_attach_paging_domain, ++ .free = riscv_iommu_free_paging_domain, ++ .map_pages = riscv_iommu_map_pages, ++ .unmap_pages = riscv_iommu_unmap_pages, ++ .iova_to_phys = riscv_iommu_iova_to_phys, ++ .iotlb_sync = riscv_iommu_iotlb_sync, ++ .flush_iotlb_all = riscv_iommu_iotlb_flush_all, ++}; ++ ++static struct iommu_domain *riscv_iommu_alloc_paging_domain(struct device *dev) ++{ ++ struct riscv_iommu_domain *domain; ++ struct riscv_iommu_device *iommu; ++ unsigned int pgd_mode; ++ dma_addr_t va_mask; ++ int va_bits; ++ ++ iommu = dev_to_iommu(dev); ++ if (iommu->caps & RISCV_IOMMU_CAPABILITIES_SV57) { ++ pgd_mode = RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV57; ++ va_bits = 57; ++ } else if (iommu->caps & RISCV_IOMMU_CAPABILITIES_SV48) { ++ pgd_mode = RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV48; ++ va_bits = 48; ++ } else if (iommu->caps & RISCV_IOMMU_CAPABILITIES_SV39) { ++ pgd_mode = RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39; ++ va_bits = 39; ++ } else { ++ dev_err(dev, "cannot find supported page table mode\n"); ++ return ERR_PTR(-ENODEV); ++ } ++ ++ domain = kzalloc(sizeof(*domain), GFP_KERNEL); ++ if (!domain) ++ return ERR_PTR(-ENOMEM); ++ ++ INIT_LIST_HEAD_RCU(&domain->bonds); ++ spin_lock_init(&domain->lock); ++ domain->numa_node = dev_to_node(iommu->dev); ++ domain->amo_enabled = !!(iommu->caps & RISCV_IOMMU_CAPABILITIES_AMO_HWAD); ++ domain->pgd_mode = pgd_mode; ++ domain->pgd_root = iommu_alloc_page_node(domain->numa_node, ++ GFP_KERNEL_ACCOUNT); ++ if (!domain->pgd_root) { ++ kfree(domain); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ domain->pscid = ida_alloc_range(&riscv_iommu_pscids, 1, ++ RISCV_IOMMU_MAX_PSCID, GFP_KERNEL); ++ if (domain->pscid < 0) { ++ iommu_free_page(domain->pgd_root); ++ kfree(domain); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ /* ++ * Note: RISC-V Privilege spec mandates that virtual addresses ++ * need to be sign-extended, so if (VA_BITS - 1) is set, all ++ * bits >= VA_BITS need to also be set or else we'll get a ++ * page fault. However the code that creates the mappings ++ * above us (e.g. iommu_dma_alloc_iova()) won't do that for us ++ * for now, so we'll end up with invalid virtual addresses ++ * to map. As a workaround until we get this sorted out ++ * limit the available virtual addresses to VA_BITS - 1. ++ */ ++ va_mask = DMA_BIT_MASK(va_bits - 1); ++ ++ domain->domain.geometry.aperture_start = 0; ++ domain->domain.geometry.aperture_end = va_mask; ++ domain->domain.geometry.force_aperture = true; ++ domain->domain.pgsize_bitmap = va_mask & (SZ_4K | SZ_2M | SZ_1G | SZ_512G); ++ ++ domain->domain.ops = &riscv_iommu_paging_domain_ops; ++ ++ return &domain->domain; ++} ++ ++static int riscv_iommu_attach_blocking_domain(struct iommu_domain *iommu_domain, ++ struct device *dev) ++{ ++ struct riscv_iommu_device *iommu = dev_to_iommu(dev); ++ struct riscv_iommu_info *info = dev_iommu_priv_get(dev); ++ ++ /* Make device context invalid, translation requests will fault w/ #258 */ ++ riscv_iommu_iodir_update(iommu, dev, RISCV_IOMMU_FSC_BARE, 0); ++ riscv_iommu_bond_unlink(info->domain, dev); ++ info->domain = NULL; ++ ++ return 0; ++} ++ ++static struct iommu_domain riscv_iommu_blocking_domain = { ++ .type = IOMMU_DOMAIN_BLOCKED, ++ .ops = &(const struct iommu_domain_ops) { ++ .attach_dev = riscv_iommu_attach_blocking_domain, ++ } ++}; ++ ++static int riscv_iommu_attach_identity_domain(struct iommu_domain *iommu_domain, ++ struct device *dev) ++{ ++ struct riscv_iommu_device *iommu = dev_to_iommu(dev); ++ struct riscv_iommu_info *info = dev_iommu_priv_get(dev); ++ ++ riscv_iommu_iodir_update(iommu, dev, RISCV_IOMMU_FSC_BARE, RISCV_IOMMU_PC_TA_V); ++ riscv_iommu_bond_unlink(info->domain, dev); ++ info->domain = NULL; ++ ++ return 0; ++} ++ ++static struct iommu_domain riscv_iommu_identity_domain = { ++ .type = IOMMU_DOMAIN_IDENTITY, ++ .ops = &(const struct iommu_domain_ops) { ++ .attach_dev = riscv_iommu_attach_identity_domain, ++ } ++}; ++ ++static struct iommu_group *riscv_iommu_device_group(struct device *dev) ++{ ++ if (dev_is_pci(dev)) ++ return pci_device_group(dev); ++ return generic_device_group(dev); ++} ++ ++static int riscv_iommu_of_xlate(struct device *dev, const struct of_phandle_args *args) ++{ ++ return iommu_fwspec_add_ids(dev, args->args, 1); ++} ++ ++static struct iommu_device *riscv_iommu_probe_device(struct device *dev) ++{ ++ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); ++ struct riscv_iommu_device *iommu; ++ struct riscv_iommu_info *info; ++ struct riscv_iommu_dc *dc; ++ u64 tc; ++ int i; ++ ++ if (!fwspec || !fwspec->iommu_fwnode->dev || !fwspec->num_ids) ++ return ERR_PTR(-ENODEV); ++ ++ iommu = dev_get_drvdata(fwspec->iommu_fwnode->dev); ++ if (!iommu) ++ return ERR_PTR(-ENODEV); ++ ++ /* ++ * IOMMU hardware operating in fail-over BARE mode will provide ++ * identity translation for all connected devices anyway... ++ */ ++ if (iommu->ddt_mode <= RISCV_IOMMU_DDTP_IOMMU_MODE_BARE) ++ return ERR_PTR(-ENODEV); ++ ++ info = kzalloc(sizeof(*info), GFP_KERNEL); ++ if (!info) ++ return ERR_PTR(-ENOMEM); ++ /* ++ * Allocate and pre-configure device context entries in ++ * the device directory. Do not mark the context valid yet. ++ */ ++ tc = 0; ++ if (iommu->caps & RISCV_IOMMU_CAPABILITIES_AMO_HWAD) ++ tc |= RISCV_IOMMU_DC_TC_SADE; ++ for (i = 0; i < fwspec->num_ids; i++) { ++ dc = riscv_iommu_get_dc(iommu, fwspec->ids[i]); ++ if (!dc) { ++ kfree(info); ++ return ERR_PTR(-ENODEV); ++ } ++ if (READ_ONCE(dc->tc) & RISCV_IOMMU_DC_TC_V) ++ dev_warn(dev, "already attached to IOMMU device directory\n"); ++ WRITE_ONCE(dc->tc, tc); ++ } ++ ++ dev_iommu_priv_set(dev, info); ++ ++ return &iommu->iommu; ++} ++ ++static void riscv_iommu_release_device(struct device *dev) ++{ ++ struct riscv_iommu_info *info = dev_iommu_priv_get(dev); ++ ++ kfree_rcu_mightsleep(info); ++} ++ ++static const struct iommu_ops riscv_iommu_ops = { ++ .pgsize_bitmap = SZ_4K, ++ .of_xlate = riscv_iommu_of_xlate, ++ .identity_domain = &riscv_iommu_identity_domain, ++ .blocked_domain = &riscv_iommu_blocking_domain, ++ .release_domain = &riscv_iommu_blocking_domain, ++ .domain_alloc_paging = riscv_iommu_alloc_paging_domain, ++ .device_group = riscv_iommu_device_group, ++ .probe_device = riscv_iommu_probe_device, ++ .release_device = riscv_iommu_release_device, ++}; ++ ++static int riscv_iommu_init_check(struct riscv_iommu_device *iommu) ++{ ++ u64 ddtp; ++ ++ /* ++ * Make sure the IOMMU is switched off or in pass-through mode during ++ * regular boot flow and disable translation when we boot into a kexec ++ * kernel and the previous kernel left them enabled. ++ */ ++ ddtp = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_DDTP); ++ if (ddtp & RISCV_IOMMU_DDTP_BUSY) ++ return -EBUSY; ++ ++ if (FIELD_GET(RISCV_IOMMU_DDTP_IOMMU_MODE, ddtp) > ++ RISCV_IOMMU_DDTP_IOMMU_MODE_BARE) { ++ if (!is_kdump_kernel()) ++ return -EBUSY; ++ riscv_iommu_disable(iommu); ++ } ++ ++ /* Configure accesses to in-memory data structures for CPU-native byte order. */ ++ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) != ++ !!(iommu->fctl & RISCV_IOMMU_FCTL_BE)) { ++ if (!(iommu->caps & RISCV_IOMMU_CAPABILITIES_END)) ++ return -EINVAL; ++ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, ++ iommu->fctl ^ RISCV_IOMMU_FCTL_BE); ++ iommu->fctl = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_FCTL); ++ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) != ++ !!(iommu->fctl & RISCV_IOMMU_FCTL_BE)) ++ return -EINVAL; ++ } ++ ++ /* ++ * Distribute interrupt vectors, always use first vector for CIV. ++ * At least one interrupt is required. Read back and verify. ++ */ ++ if (!iommu->irqs_count) ++ return -EINVAL; ++ ++ iommu->icvec = FIELD_PREP(RISCV_IOMMU_ICVEC_FIV, 1 % iommu->irqs_count) | ++ FIELD_PREP(RISCV_IOMMU_ICVEC_PIV, 2 % iommu->irqs_count) | ++ FIELD_PREP(RISCV_IOMMU_ICVEC_PMIV, 3 % iommu->irqs_count); ++ riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_ICVEC, iommu->icvec); ++ iommu->icvec = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_ICVEC); ++ if (max(max(FIELD_GET(RISCV_IOMMU_ICVEC_CIV, iommu->icvec), ++ FIELD_GET(RISCV_IOMMU_ICVEC_FIV, iommu->icvec)), ++ max(FIELD_GET(RISCV_IOMMU_ICVEC_PIV, iommu->icvec), ++ FIELD_GET(RISCV_IOMMU_ICVEC_PMIV, iommu->icvec))) >= iommu->irqs_count) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++void riscv_iommu_remove(struct riscv_iommu_device *iommu) ++{ ++ iommu_device_unregister(&iommu->iommu); ++ iommu_device_sysfs_remove(&iommu->iommu); ++ riscv_iommu_iodir_set_mode(iommu, RISCV_IOMMU_DDTP_IOMMU_MODE_OFF); ++ riscv_iommu_queue_disable(&iommu->cmdq); ++ riscv_iommu_queue_disable(&iommu->fltq); ++} ++ ++int riscv_iommu_init(struct riscv_iommu_device *iommu) ++{ ++ int rc; ++ ++ RISCV_IOMMU_QUEUE_INIT(&iommu->cmdq, CQ); ++ RISCV_IOMMU_QUEUE_INIT(&iommu->fltq, FQ); ++ ++ rc = riscv_iommu_init_check(iommu); ++ if (rc) ++ return dev_err_probe(iommu->dev, rc, "unexpected device state\n"); ++ ++ rc = riscv_iommu_iodir_alloc(iommu); ++ if (rc) ++ return rc; ++ ++ rc = riscv_iommu_queue_alloc(iommu, &iommu->cmdq, ++ sizeof(struct riscv_iommu_command)); ++ if (rc) ++ return rc; ++ ++ rc = riscv_iommu_queue_alloc(iommu, &iommu->fltq, ++ sizeof(struct riscv_iommu_fq_record)); ++ if (rc) ++ return rc; ++ ++ rc = riscv_iommu_queue_enable(iommu, &iommu->cmdq, riscv_iommu_cmdq_process); ++ if (rc) ++ return rc; ++ ++ rc = riscv_iommu_queue_enable(iommu, &iommu->fltq, riscv_iommu_fltq_process); ++ if (rc) ++ goto err_queue_disable; ++ ++ rc = riscv_iommu_iodir_set_mode(iommu, RISCV_IOMMU_DDTP_IOMMU_MODE_MAX); ++ if (rc) ++ goto err_queue_disable; ++ ++ rc = iommu_device_sysfs_add(&iommu->iommu, NULL, NULL, "riscv-iommu@%s", ++ dev_name(iommu->dev)); ++ if (rc) { ++ dev_err_probe(iommu->dev, rc, "cannot register sysfs interface\n"); ++ goto err_iodir_off; ++ } ++ ++ rc = iommu_device_register(&iommu->iommu, &riscv_iommu_ops, iommu->dev); ++ if (rc) { ++ dev_err_probe(iommu->dev, rc, "cannot register iommu interface\n"); ++ goto err_remove_sysfs; ++ } ++ ++ return 0; ++ ++err_remove_sysfs: ++ iommu_device_sysfs_remove(&iommu->iommu); ++err_iodir_off: ++ riscv_iommu_iodir_set_mode(iommu, RISCV_IOMMU_DDTP_IOMMU_MODE_OFF); ++err_queue_disable: ++ riscv_iommu_queue_disable(&iommu->fltq); ++ riscv_iommu_queue_disable(&iommu->cmdq); ++ return rc; ++} +diff --git a/drivers/iommu/riscv/iommu.h b/drivers/iommu/riscv/iommu.h +new file mode 100644 +index 000000000000..b1c4664542b4 +--- /dev/null ++++ b/drivers/iommu/riscv/iommu.h +@@ -0,0 +1,88 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright © 2022-2024 Rivos Inc. ++ * Copyright © 2023 FORTH-ICS/CARV ++ * ++ * Authors ++ * Tomasz Jeznach ++ * Nick Kossifidis ++ */ ++ ++#ifndef _RISCV_IOMMU_H_ ++#define _RISCV_IOMMU_H_ ++ ++#include ++#include ++#include ++ ++#include "iommu-bits.h" ++ ++struct riscv_iommu_device; ++ ++struct riscv_iommu_queue { ++ atomic_t prod; /* unbounded producer allocation index */ ++ atomic_t head; /* unbounded shadow ring buffer consumer index */ ++ atomic_t tail; /* unbounded shadow ring buffer producer index */ ++ unsigned int mask; /* index mask, queue length - 1 */ ++ unsigned int irq; /* allocated interrupt number */ ++ struct riscv_iommu_device *iommu; /* iommu device handling the queue when active */ ++ void *base; /* ring buffer kernel pointer */ ++ dma_addr_t phys; /* ring buffer physical address */ ++ u16 qbr; /* base register offset, head and tail reference */ ++ u16 qcr; /* control and status register offset */ ++ u8 qid; /* queue identifier, same as RISCV_IOMMU_INTR_XX */ ++}; ++ ++struct riscv_iommu_device { ++ /* iommu core interface */ ++ struct iommu_device iommu; ++ ++ /* iommu hardware */ ++ struct device *dev; ++ ++ /* hardware control register space */ ++ void __iomem *reg; ++ ++ /* supported and enabled hardware capabilities */ ++ u64 caps; ++ u32 fctl; ++ ++ /* available interrupt numbers, MSI or WSI */ ++ unsigned int irqs[RISCV_IOMMU_INTR_COUNT]; ++ unsigned int irqs_count; ++ unsigned int icvec; ++ ++ /* hardware queues */ ++ struct riscv_iommu_queue cmdq; ++ struct riscv_iommu_queue fltq; ++ ++ /* device directory */ ++ unsigned int ddt_mode; ++ dma_addr_t ddt_phys; ++ u64 *ddt_root; ++}; ++ ++int riscv_iommu_init(struct riscv_iommu_device *iommu); ++void riscv_iommu_remove(struct riscv_iommu_device *iommu); ++ ++#define riscv_iommu_readl(iommu, addr) \ ++ readl_relaxed((iommu)->reg + (addr)) ++ ++#define riscv_iommu_readq(iommu, addr) \ ++ readq_relaxed((iommu)->reg + (addr)) ++ ++#define riscv_iommu_writel(iommu, addr, val) \ ++ writel_relaxed((val), (iommu)->reg + (addr)) ++ ++#define riscv_iommu_writeq(iommu, addr, val) \ ++ writeq_relaxed((val), (iommu)->reg + (addr)) ++ ++#define riscv_iommu_readq_timeout(iommu, addr, val, cond, delay_us, timeout_us) \ ++ readx_poll_timeout(readq_relaxed, (iommu)->reg + (addr), val, cond, \ ++ delay_us, timeout_us) ++ ++#define riscv_iommu_readl_timeout(iommu, addr, val, cond, delay_us, timeout_us) \ ++ readx_poll_timeout(readl_relaxed, (iommu)->reg + (addr), val, cond, \ ++ delay_us, timeout_us) ++ ++#endif +diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c +index 2685861c0a12..da79d9f4cf63 100644 +--- a/drivers/iommu/rockchip-iommu.c ++++ b/drivers/iommu/rockchip-iommu.c +@@ -1140,7 +1140,7 @@ static void rk_iommu_release_device(struct device *dev) + } + + static int rk_iommu_of_xlate(struct device *dev, +- struct of_phandle_args *args) ++ const struct of_phandle_args *args) + { + struct platform_device *iommu_dev; + struct rk_iommudata *data; +diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c +index 61b0b73b85bb..a2f4ffe6d949 100644 +--- a/drivers/iommu/sprd-iommu.c ++++ b/drivers/iommu/sprd-iommu.c +@@ -390,7 +390,8 @@ static struct iommu_device *sprd_iommu_probe_device(struct device *dev) + return &sdev->iommu; + } + +-static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) ++static int sprd_iommu_of_xlate(struct device *dev, ++ const struct of_phandle_args *args) + { + struct platform_device *pdev; + +diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c +index 6954b0d5a8ea..9896b940df66 100644 +--- a/drivers/iommu/sun50i-iommu.c ++++ b/drivers/iommu/sun50i-iommu.c +@@ -820,7 +820,7 @@ static struct iommu_device *sun50i_iommu_probe_device(struct device *dev) + } + + static int sun50i_iommu_of_xlate(struct device *dev, +- struct of_phandle_args *args) ++ const struct of_phandle_args *args) + { + struct platform_device *iommu_pdev = of_find_device_by_node(args->np); + unsigned id = args->args[0]; +diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c +index 310871728ab4..14e525bd0d9b 100644 +--- a/drivers/iommu/tegra-smmu.c ++++ b/drivers/iommu/tegra-smmu.c +@@ -830,7 +830,7 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np) + } + + static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev, +- struct of_phandle_args *args) ++ const struct of_phandle_args *args) + { + const struct iommu_ops *ops = smmu->iommu.ops; + int err; +@@ -959,7 +959,7 @@ static struct iommu_group *tegra_smmu_device_group(struct device *dev) + } + + static int tegra_smmu_of_xlate(struct device *dev, +- struct of_phandle_args *args) ++ const struct of_phandle_args *args) + { + struct platform_device *iommu_pdev = of_find_device_by_node(args->np); + struct tegra_mc *mc = platform_get_drvdata(iommu_pdev); +diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c +index 34db37fd9675..04048f64a2c0 100644 +--- a/drivers/iommu/virtio-iommu.c ++++ b/drivers/iommu/virtio-iommu.c +@@ -1051,7 +1051,8 @@ static struct iommu_group *viommu_device_group(struct device *dev) + return generic_device_group(dev); + } + +-static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args) ++static int viommu_of_xlate(struct device *dev, ++ const struct of_phandle_args *args) + { + return iommu_fwspec_add_ids(dev, args->args, 1); + } diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig -index 6edafab595e6..73546cc81c5a 100644 +index 6edafab595e6..bd40d887b9ee 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig -@@ -602,6 +602,31 @@ config RISCV_INTC +@@ -454,6 +454,14 @@ config LS_SCFG_MSI + config PARTITION_PERCPU + bool + ++config SOPHGO_SG2044_MSI ++ bool "Sophgo SG2044 MSI controller" ++ depends on ARCH_SOPHGO ++ help ++ Support for the Sophgo SG2044 MSI Controller. ++ This on-chip interrupt controller enables MSI sources to be ++ routed to the primary PLIC controller on SoC. ++ + config STM32_EXTI + bool + select IRQ_DOMAIN +@@ -602,12 +610,49 @@ config RISCV_INTC depends on RISCV select IRQ_DOMAIN_HIERARCHY @@ -350815,11 +381495,37 @@ index 6edafab595e6..73546cc81c5a 100644 config SIFIVE_PLIC bool depends on RISCV + select IRQ_DOMAIN_HIERARCHY + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP + ++config THEAD_C900_ACLINT_SSWI ++ bool "THEAD C9XX ACLINT S-mode IPI Interrupt Controller" ++ depends on RISCV ++ depends on SMP ++ select IRQ_DOMAIN_HIERARCHY ++ select GENERIC_IRQ_IPI_MUX ++ help ++ This enables support for T-HEAD specific ACLINT SSWI device ++ support. ++ ++ If you don't know what to do here, say Y. ++ + config EXYNOS_IRQ_COMBINER + bool "Samsung Exynos IRQ combiner support" if COMPILE_TEST + depends on (ARCH_EXYNOS && ARM) || COMPILE_TEST diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile -index f4697c1a39c0..51b46cab7a2f 100644 +index f4697c1a39c0..f3c0245f38e6 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile -@@ -103,6 +103,9 @@ obj-$(CONFIG_QCOM_MPM) += irq-qcom-mpm.o +@@ -92,6 +92,7 @@ obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o + obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o + obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o + obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o ++obj-$(CONFIG_SOPHGO_SG2044_MSI) += irq-sg2044-msi.o + obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o + obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o + obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o +@@ -103,7 +104,11 @@ obj-$(CONFIG_QCOM_MPM) += irq-qcom-mpm.o obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o @@ -350827,20 +381533,23 @@ index f4697c1a39c0..51b46cab7a2f 100644 +obj-$(CONFIG_RISCV_APLIC_MSI) += irq-riscv-aplic-msi.o +obj-$(CONFIG_RISCV_IMSIC) += irq-riscv-imsic-state.o irq-riscv-imsic-early.o irq-riscv-imsic-platform.o obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o ++obj-$(CONFIG_THEAD_C900_ACLINT_SSWI) += irq-thead-c900-aclint-sswi.o obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o + obj-$(CONFIG_IMX_MU_MSI) += irq-imx-mu-msi.o diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c b/drivers/irqchip/irq-riscv-aplic-direct.c new file mode 100644 -index 000000000000..4a3ffe856d6c +index 000000000000..7cd6b646774b --- /dev/null +++ b/drivers/irqchip/irq-riscv-aplic-direct.c -@@ -0,0 +1,323 @@ +@@ -0,0 +1,329 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + ++#include +#include +#include +#include @@ -351026,17 +381735,22 @@ index 000000000000..4a3ffe856d6c +} + +static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index, -+ u32 *parent_hwirq, unsigned long *parent_hartid) ++ u32 *parent_hwirq, unsigned long *parent_hartid, ++ struct aplic_priv *priv) +{ + struct of_phandle_args parent; ++ unsigned long hartid; + int rc; + -+ /* -+ * Currently, only OF fwnode is supported so extend this -+ * function for ACPI support. -+ */ -+ if (!is_of_node(dev->fwnode)) -+ return -EINVAL; ++ if (!is_of_node(dev->fwnode)) { ++ hartid = acpi_rintc_ext_parent_to_hartid(priv->acpi_aplic_id, index); ++ if (hartid == INVALID_HARTID) ++ return -ENODEV; ++ ++ *parent_hartid = hartid; ++ *parent_hwirq = RV_IRQ_EXT; ++ return 0; ++ } + + rc = of_irq_parse_one(to_of_node(dev->fwnode), index, &parent); + if (rc) @@ -351074,7 +381788,7 @@ index 000000000000..4a3ffe856d6c + /* Setup per-CPU IDC and target CPU mask */ + current_cpu = get_cpu(); + for (i = 0; i < priv->nr_idcs; i++) { -+ rc = aplic_direct_parse_parent_hwirq(dev, i, &hwirq, &hartid); ++ rc = aplic_direct_parse_parent_hwirq(dev, i, &hwirq, &hartid, priv); + if (rc) { + dev_warn(dev, "parent irq for IDC%d not found\n", i); + continue; @@ -351160,18 +381874,20 @@ index 000000000000..4a3ffe856d6c +} diff --git a/drivers/irqchip/irq-riscv-aplic-main.c b/drivers/irqchip/irq-riscv-aplic-main.c new file mode 100644 -index 000000000000..4ed7a1db7776 +index 000000000000..93e7c51f944a --- /dev/null +++ b/drivers/irqchip/irq-riscv-aplic-main.c -@@ -0,0 +1,211 @@ +@@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + ++#include +#include +#include ++#include +#include +#include +#include @@ -351291,39 +382007,50 @@ index 000000000000..4ed7a1db7776 + writel(0, priv->regs + APLIC_DOMAINCFG); +} + ++#ifdef CONFIG_ACPI ++static const struct acpi_device_id aplic_acpi_match[] = { ++ { "RSCV0002", 0 }, ++ {} ++}; ++MODULE_DEVICE_TABLE(acpi, aplic_acpi_match); ++ ++#endif ++ +int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *regs) +{ + struct device_node *np = to_of_node(dev->fwnode); + struct of_phandle_args parent; + int rc; + -+ /* -+ * Currently, only OF fwnode is supported so extend this -+ * function for ACPI support. -+ */ -+ if (!np) -+ return -EINVAL; -+ + /* Save device pointer and register base */ + priv->dev = dev; + priv->regs = regs; + -+ /* Find out number of interrupt sources */ -+ rc = of_property_read_u32(np, "riscv,num-sources", &priv->nr_irqs); -+ if (rc) { -+ dev_err(dev, "failed to get number of interrupt sources\n"); -+ return rc; -+ } ++ if (np) { ++ /* Find out number of interrupt sources */ ++ rc = of_property_read_u32(np, "riscv,num-sources", &priv->nr_irqs); ++ if (rc) { ++ dev_err(dev, "failed to get number of interrupt sources\n"); ++ return rc; ++ } + -+ /* -+ * Find out number of IDCs based on parent interrupts -+ * -+ * If "msi-parent" property is present then we ignore the -+ * APLIC IDCs which forces the APLIC driver to use MSI mode. -+ */ -+ if (!of_property_present(np, "msi-parent")) { -+ while (!of_irq_parse_one(np, priv->nr_idcs, &parent)) -+ priv->nr_idcs++; ++ /* ++ * Find out number of IDCs based on parent interrupts ++ * ++ * If "msi-parent" property is present then we ignore the ++ * APLIC IDCs which forces the APLIC driver to use MSI mode. ++ */ ++ if (!of_property_present(np, "msi-parent")) { ++ while (!of_irq_parse_one(np, priv->nr_idcs, &parent)) ++ priv->nr_idcs++; ++ } ++ } else { ++ rc = riscv_acpi_get_gsi_info(dev->fwnode, &priv->gsi_base, &priv->acpi_aplic_id, ++ &priv->nr_irqs, &priv->nr_idcs); ++ if (rc) { ++ dev_err(dev, "failed to find GSI mapping\n"); ++ return rc; ++ } + } + + /* Setup initial state APLIC interrupts */ @@ -351350,7 +382077,11 @@ index 000000000000..4ed7a1db7776 + * If msi-parent property is present then setup APLIC MSI + * mode otherwise setup APLIC direct mode. + */ -+ msi_mode = of_property_present(to_of_node(dev->fwnode), "msi-parent"); ++ if (is_of_node(dev->fwnode)) ++ msi_mode = of_property_present(to_of_node(dev->fwnode), "msi-parent"); ++ else ++ msi_mode = imsic_acpi_get_fwnode(NULL) ? 1 : 0; ++ + if (msi_mode) + rc = aplic_msi_setup(dev, regs); + else @@ -351359,6 +382090,11 @@ index 000000000000..4ed7a1db7776 + dev_err_probe(dev, rc, "failed to setup APLIC in %s mode\n", + msi_mode ? "MSI" : "direct"); + ++#ifdef CONFIG_ACPI ++ if (!acpi_disabled) ++ acpi_dev_clear_dependencies(ACPI_COMPANION(dev)); ++#endif ++ + return rc; +} + @@ -351371,16 +382107,17 @@ index 000000000000..4ed7a1db7776 + .driver = { + .name = "riscv-aplic", + .of_match_table = aplic_match, ++ .acpi_match_table = ACPI_PTR(aplic_acpi_match), + }, + .probe = aplic_probe, +}; +builtin_platform_driver(aplic_driver); diff --git a/drivers/irqchip/irq-riscv-aplic-main.h b/drivers/irqchip/irq-riscv-aplic-main.h new file mode 100644 -index 000000000000..4393927d8c80 +index 000000000000..b0ad8cde69b1 --- /dev/null +++ b/drivers/irqchip/irq-riscv-aplic-main.h -@@ -0,0 +1,52 @@ +@@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. @@ -351411,6 +382148,7 @@ index 000000000000..4393927d8c80 + u32 gsi_base; + u32 nr_irqs; + u32 nr_idcs; ++ u32 acpi_aplic_id; + void __iomem *regs; + struct aplic_msicfg msicfg; +}; @@ -351435,10 +382173,10 @@ index 000000000000..4393927d8c80 +#endif diff --git a/drivers/irqchip/irq-riscv-aplic-msi.c b/drivers/irqchip/irq-riscv-aplic-msi.c new file mode 100644 -index 000000000000..c4a5b375a75f +index 000000000000..ad5c57c0ed06 --- /dev/null +++ b/drivers/irqchip/irq-riscv-aplic-msi.c -@@ -0,0 +1,278 @@ +@@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. @@ -351616,6 +382354,7 @@ index 000000000000..c4a5b375a75f +int aplic_msi_setup(struct device *dev, void __iomem *regs) +{ + const struct imsic_global_config *imsic_global; ++ struct irq_domain *msi_domain; + struct aplic_priv *priv; + struct aplic_msicfg *mc; + phys_addr_t pa; @@ -351698,8 +382437,14 @@ index 000000000000..c4a5b375a75f + * IMSIC and the IMSIC MSI domains are created later through + * the platform driver probing so we set it explicitly here. + */ -+ if (is_of_node(dev->fwnode)) ++ if (is_of_node(dev->fwnode)) { + of_msi_configure(dev, to_of_node(dev->fwnode)); ++ } else { ++ msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev), ++ DOMAIN_BUS_PLATFORM_MSI); ++ if (msi_domain) ++ dev_set_msi_domain(dev, msi_domain); ++ } + + if (!dev_get_msi_domain(dev)) + return -EPROBE_DEFER; @@ -351719,10 +382464,10 @@ index 000000000000..c4a5b375a75f +} diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c new file mode 100644 -index 000000000000..886418ec06cb +index 000000000000..d586c579713d --- /dev/null +++ b/drivers/irqchip/irq-riscv-imsic-early.c -@@ -0,0 +1,201 @@ +@@ -0,0 +1,263 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. @@ -351730,13 +382475,16 @@ index 000000000000..886418ec06cb + */ + +#define pr_fmt(fmt) "riscv-imsic: " fmt ++#include +#include +#include +#include +#include +#include +#include ++#include +#include ++#include +#include +#include + @@ -351907,7 +382655,7 @@ index 000000000000..886418ec06cb + int rc; + + /* Setup IMSIC state */ -+ rc = imsic_setup_state(fwnode); ++ rc = imsic_setup_state(fwnode, NULL); + if (rc) { + pr_err("%pfwP: failed to setup state (error %d)\n", fwnode, rc); + return rc; @@ -351924,12 +382672,71 @@ index 000000000000..886418ec06cb +} + +IRQCHIP_DECLARE(riscv_imsic, "riscv,imsics", imsic_early_dt_init); ++ ++#ifdef CONFIG_ACPI ++ ++static struct fwnode_handle *imsic_acpi_fwnode; ++ ++struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev) ++{ ++ return imsic_acpi_fwnode; ++} ++ ++static int __init imsic_early_acpi_init(union acpi_subtable_headers *header, ++ const unsigned long end) ++{ ++ struct acpi_madt_imsic *imsic = (struct acpi_madt_imsic *)header; ++ int rc; ++ ++ imsic_acpi_fwnode = irq_domain_alloc_named_fwnode("imsic"); ++ if (!imsic_acpi_fwnode) { ++ pr_err("unable to allocate IMSIC FW node\n"); ++ return -ENOMEM; ++ } ++ ++ /* Setup IMSIC state */ ++ rc = imsic_setup_state(imsic_acpi_fwnode, imsic); ++ if (rc) { ++ pr_err("%pfwP: failed to setup state (error %d)\n", imsic_acpi_fwnode, rc); ++ return rc; ++ } ++ ++ /* Do early setup of IMSIC state and IPIs */ ++ rc = imsic_early_probe(imsic_acpi_fwnode); ++ if (rc) { ++ irq_domain_free_fwnode(imsic_acpi_fwnode); ++ imsic_acpi_fwnode = NULL; ++ return rc; ++ } ++ ++ rc = imsic_platform_acpi_probe(imsic_acpi_fwnode); ++ ++#ifdef CONFIG_PCI ++ if (!rc) ++ pci_msi_register_fwnode_provider(&imsic_acpi_get_fwnode); ++#endif ++ ++ if (rc) ++ pr_err("%pfwP: failed to register IMSIC for MSI functionality (error %d)\n", ++ imsic_acpi_fwnode, rc); ++ ++ /* ++ * Even if imsic_platform_acpi_probe() fails, the IPI part of IMSIC can ++ * continue to work. So, no need to return failure. This is similar to ++ * DT where IPI works but MSI probe fails for some reason. ++ */ ++ return 0; ++} ++ ++IRQCHIP_ACPI_DECLARE(riscv_imsic, ACPI_MADT_TYPE_IMSIC, NULL, ++ 1, imsic_early_acpi_init); ++#endif diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c new file mode 100644 -index 000000000000..c5ec66e0bfd3 +index 000000000000..c708780e8760 --- /dev/null +++ b/drivers/irqchip/irq-riscv-imsic-platform.c -@@ -0,0 +1,375 @@ +@@ -0,0 +1,395 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. @@ -351937,6 +382744,7 @@ index 000000000000..c5ec66e0bfd3 + */ + +#define pr_fmt(fmt) "riscv-imsic: " fmt ++#include +#include +#include +#include @@ -352280,18 +383088,37 @@ index 000000000000..c5ec66e0bfd3 + return 0; +} + -+static int imsic_platform_probe(struct platform_device *pdev) ++static int imsic_platform_probe_common(struct fwnode_handle *fwnode) +{ -+ struct device *dev = &pdev->dev; -+ -+ if (imsic && imsic->fwnode != dev->fwnode) { -+ dev_err(dev, "fwnode mismatch\n"); ++ if (imsic && imsic->fwnode != fwnode) { ++ pr_err("%pfwP: fwnode mismatch\n", fwnode); + return -ENODEV; + } + + return imsic_irqdomain_init(); +} + ++static int imsic_platform_dt_probe(struct platform_device *pdev) ++{ ++ return imsic_platform_probe_common(pdev->dev.fwnode); ++} ++ ++#ifdef CONFIG_ACPI ++ ++/* ++ * On ACPI based systems, PCI enumeration happens early during boot in ++ * acpi_scan_init(). PCI enumeration expects MSI domain setup before ++ * it calls pci_set_msi_domain(). Hence, unlike in DT where ++ * imsic-platform drive probe happens late during boot, ACPI based ++ * systems need to setup the MSI domain early. ++ */ ++int imsic_platform_acpi_probe(struct fwnode_handle *fwnode) ++{ ++ return imsic_platform_probe_common(fwnode); ++} ++ ++#endif ++ +static const struct of_device_id imsic_platform_match[] = { + { .compatible = "riscv,imsics" }, + {} @@ -352302,15 +383129,15 @@ index 000000000000..c5ec66e0bfd3 + .name = "riscv-imsic", + .of_match_table = imsic_platform_match, + }, -+ .probe = imsic_platform_probe, ++ .probe = imsic_platform_dt_probe, +}; +builtin_platform_driver(imsic_platform_driver); diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c new file mode 100644 -index 000000000000..5479f872e62b +index 000000000000..b97e6cd89ed7 --- /dev/null +++ b/drivers/irqchip/irq-riscv-imsic-state.c -@@ -0,0 +1,865 @@ +@@ -0,0 +1,891 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. @@ -352318,6 +383145,7 @@ index 000000000000..5479f872e62b + */ + +#define pr_fmt(fmt) "riscv-imsic: " fmt ++#include +#include +#include +#include @@ -352823,18 +383651,90 @@ index 000000000000..5479f872e62b + return 0; +} + ++static int __init imsic_populate_global_dt(struct fwnode_handle *fwnode, ++ struct imsic_global_config *global, ++ u32 *nr_parent_irqs) ++{ ++ int rc; ++ ++ /* Find number of guest index bits in MSI address */ ++ rc = of_property_read_u32(to_of_node(fwnode), "riscv,guest-index-bits", ++ &global->guest_index_bits); ++ if (rc) ++ global->guest_index_bits = 0; ++ ++ /* Find number of HART index bits */ ++ rc = of_property_read_u32(to_of_node(fwnode), "riscv,hart-index-bits", ++ &global->hart_index_bits); ++ if (rc) { ++ /* Assume default value */ ++ global->hart_index_bits = __fls(*nr_parent_irqs); ++ if (BIT(global->hart_index_bits) < *nr_parent_irqs) ++ global->hart_index_bits++; ++ } ++ ++ /* Find number of group index bits */ ++ rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-bits", ++ &global->group_index_bits); ++ if (rc) ++ global->group_index_bits = 0; ++ ++ /* ++ * Find first bit position of group index. ++ * If not specified assumed the default APLIC-IMSIC configuration. ++ */ ++ rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-shift", ++ &global->group_index_shift); ++ if (rc) ++ global->group_index_shift = IMSIC_MMIO_PAGE_SHIFT * 2; ++ ++ /* Find number of interrupt identities */ ++ rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-ids", ++ &global->nr_ids); ++ if (rc) { ++ pr_err("%pfwP: number of interrupt identities not found\n", fwnode); ++ return rc; ++ } ++ ++ /* Find number of guest interrupt identities */ ++ rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-guest-ids", ++ &global->nr_guest_ids); ++ if (rc) ++ global->nr_guest_ids = global->nr_ids; ++ ++ return 0; ++} ++ ++static int __init imsic_populate_global_acpi(struct fwnode_handle *fwnode, ++ struct imsic_global_config *global, ++ u32 *nr_parent_irqs, void *opaque) ++{ ++ struct acpi_madt_imsic *imsic = (struct acpi_madt_imsic *)opaque; ++ ++ global->guest_index_bits = imsic->guest_index_bits; ++ global->hart_index_bits = imsic->hart_index_bits; ++ global->group_index_bits = imsic->group_index_bits; ++ global->group_index_shift = imsic->group_index_shift; ++ global->nr_ids = imsic->num_ids; ++ global->nr_guest_ids = imsic->num_guest_ids; ++ return 0; ++} ++ +static int __init imsic_get_parent_hartid(struct fwnode_handle *fwnode, + u32 index, unsigned long *hartid) +{ + struct of_phandle_args parent; + int rc; + -+ /* -+ * Currently, only OF fwnode is supported so extend this -+ * function for ACPI support. -+ */ -+ if (!is_of_node(fwnode)) -+ return -EINVAL; ++ if (!is_of_node(fwnode)) { ++ if (hartid) ++ *hartid = acpi_rintc_index_to_hartid(index); ++ ++ if (!hartid || (*hartid == INVALID_HARTID)) ++ return -EINVAL; ++ ++ return 0; ++ } + + rc = of_irq_parse_one(to_of_node(fwnode), index, &parent); + if (rc) @@ -352853,12 +383753,8 @@ index 000000000000..5479f872e62b +static int __init imsic_get_mmio_resource(struct fwnode_handle *fwnode, + u32 index, struct resource *res) +{ -+ /* -+ * Currently, only OF fwnode is supported so extend this -+ * function for ACPI support. -+ */ + if (!is_of_node(fwnode)) -+ return -EINVAL; ++ return acpi_rintc_get_imsic_mmio_info(index, res); + + return of_address_to_resource(to_of_node(fwnode), index, res); +} @@ -352866,20 +383762,14 @@ index 000000000000..5479f872e62b +static int __init imsic_parse_fwnode(struct fwnode_handle *fwnode, + struct imsic_global_config *global, + u32 *nr_parent_irqs, -+ u32 *nr_mmios) ++ u32 *nr_mmios, ++ void *opaque) +{ + unsigned long hartid; + struct resource res; + int rc; + u32 i; + -+ /* -+ * Currently, only OF fwnode is supported so extend this -+ * function for ACPI support. -+ */ -+ if (!is_of_node(fwnode)) -+ return -EINVAL; -+ + *nr_parent_irqs = 0; + *nr_mmios = 0; + @@ -352891,50 +383781,13 @@ index 000000000000..5479f872e62b + return -EINVAL; + } + -+ /* Find number of guest index bits in MSI address */ -+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,guest-index-bits", -+ &global->guest_index_bits); -+ if (rc) -+ global->guest_index_bits = 0; -+ -+ /* Find number of HART index bits */ -+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,hart-index-bits", -+ &global->hart_index_bits); -+ if (rc) { -+ /* Assume default value */ -+ global->hart_index_bits = __fls(*nr_parent_irqs); -+ if (BIT(global->hart_index_bits) < *nr_parent_irqs) -+ global->hart_index_bits++; -+ } -+ -+ /* Find number of group index bits */ -+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-bits", -+ &global->group_index_bits); -+ if (rc) -+ global->group_index_bits = 0; ++ if (is_of_node(fwnode)) ++ rc = imsic_populate_global_dt(fwnode, global, nr_parent_irqs); ++ else ++ rc = imsic_populate_global_acpi(fwnode, global, nr_parent_irqs, opaque); + -+ /* -+ * Find first bit position of group index. -+ * If not specified assumed the default APLIC-IMSIC configuration. -+ */ -+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-shift", -+ &global->group_index_shift); + if (rc) -+ global->group_index_shift = IMSIC_MMIO_PAGE_SHIFT * 2; -+ -+ /* Find number of interrupt identities */ -+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-ids", -+ &global->nr_ids); -+ if (rc) { -+ pr_err("%pfwP: number of interrupt identities not found\n", fwnode); + return rc; -+ } -+ -+ /* Find number of guest interrupt identities */ -+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-guest-ids", -+ &global->nr_guest_ids); -+ if (rc) -+ global->nr_guest_ids = global->nr_ids; + + /* Sanity check guest index bits */ + i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT; @@ -353001,7 +383854,7 @@ index 000000000000..5479f872e62b + return 0; +} + -+int __init imsic_setup_state(struct fwnode_handle *fwnode) ++int __init imsic_setup_state(struct fwnode_handle *fwnode, void *opaque) +{ + u32 i, j, index, nr_parent_irqs, nr_mmios, nr_handlers = 0; + struct imsic_global_config *global; @@ -353042,7 +383895,7 @@ index 000000000000..5479f872e62b + } + + /* Parse IMSIC fwnode */ -+ rc = imsic_parse_fwnode(fwnode, global, &nr_parent_irqs, &nr_mmios); ++ rc = imsic_parse_fwnode(fwnode, global, &nr_parent_irqs, &nr_mmios, opaque); + if (rc) + goto out_free_local; + @@ -353178,7 +384031,7 @@ index 000000000000..5479f872e62b +} diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h new file mode 100644 -index 000000000000..5ae2f69b035b +index 000000000000..391e44280827 --- /dev/null +++ b/drivers/irqchip/irq-riscv-imsic-state.h @@ -0,0 +1,108 @@ @@ -353286,12 +384139,12 @@ index 000000000000..5ae2f69b035b + +void imsic_state_online(void); +void imsic_state_offline(void); -+int imsic_setup_state(struct fwnode_handle *fwnode); ++int imsic_setup_state(struct fwnode_handle *fwnode, void *opaque); +int imsic_irqdomain_init(void); + +#endif diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c -index 627beae9649a..4f3a12383a1e 100644 +index 627beae9649a..c9f6451e23b7 100644 --- a/drivers/irqchip/irq-riscv-intc.c +++ b/drivers/irqchip/irq-riscv-intc.c @@ -19,6 +19,8 @@ @@ -353392,6 +384245,1566 @@ index 627beae9649a..4f3a12383a1e 100644 return 0; } +@@ -229,14 +250,119 @@ IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init); + + #ifdef CONFIG_ACPI + ++struct rintc_data { ++ union { ++ u32 ext_intc_id; ++ struct { ++ u32 context_id : 16, ++ reserved : 8, ++ aplic_plic_id : 8; ++ }; ++ }; ++ unsigned long hart_id; ++ u64 imsic_addr; ++ u32 imsic_size; ++}; ++ ++static u32 nr_rintc; ++static struct rintc_data **rintc_acpi_data; ++ ++#define for_each_matching_plic(_plic_id) \ ++ unsigned int _plic; \ ++ \ ++ for (_plic = 0; _plic < nr_rintc; _plic++) \ ++ if (rintc_acpi_data[_plic]->aplic_plic_id != _plic_id) \ ++ continue; \ ++ else ++ ++unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id) ++{ ++ unsigned int nctx = 0; ++ ++ for_each_matching_plic(plic_id) ++ nctx++; ++ ++ return nctx; ++} ++ ++static struct rintc_data *get_plic_context(unsigned int plic_id, unsigned int ctxt_idx) ++{ ++ unsigned int ctxt = 0; ++ ++ for_each_matching_plic(plic_id) { ++ if (ctxt == ctxt_idx) ++ return rintc_acpi_data[_plic]; ++ ++ ctxt++; ++ } ++ ++ return NULL; ++} ++ ++unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id, unsigned int ctxt_idx) ++{ ++ struct rintc_data *data = get_plic_context(plic_id, ctxt_idx); ++ ++ return data ? data->hart_id : INVALID_HARTID; ++} ++ ++unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx) ++{ ++ struct rintc_data *data = get_plic_context(plic_id, ctxt_idx); ++ ++ return data ? data->context_id : INVALID_CONTEXT; ++} ++ ++unsigned long acpi_rintc_index_to_hartid(u32 index) ++{ ++ return index >= nr_rintc ? INVALID_HARTID : rintc_acpi_data[index]->hart_id; ++} ++ ++int acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res) ++{ ++ if (index >= nr_rintc) ++ return -1; ++ ++ res->start = rintc_acpi_data[index]->imsic_addr; ++ res->end = res->start + rintc_acpi_data[index]->imsic_size - 1; ++ res->flags = IORESOURCE_MEM; ++ return 0; ++} ++ ++static int __init riscv_intc_acpi_match(union acpi_subtable_headers *header, ++ const unsigned long end) ++{ ++ return 0; ++} ++ + static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header, + const unsigned long end) + { + struct acpi_madt_rintc *rintc; + struct fwnode_handle *fn; ++ int count; + int rc; + ++ if (!rintc_acpi_data) { ++ count = acpi_table_parse_madt(ACPI_MADT_TYPE_RINTC, riscv_intc_acpi_match, 0); ++ if (count <= 0) ++ return -EINVAL; ++ ++ rintc_acpi_data = kcalloc(count, sizeof(*rintc_acpi_data), GFP_KERNEL); ++ if (!rintc_acpi_data) ++ return -ENOMEM; ++ } ++ + rintc = (struct acpi_madt_rintc *)header; ++ rintc_acpi_data[nr_rintc] = kzalloc(sizeof(*rintc_acpi_data[0]), GFP_KERNEL); ++ if (!rintc_acpi_data[nr_rintc]) ++ return -ENOMEM; ++ ++ rintc_acpi_data[nr_rintc]->ext_intc_id = rintc->ext_intc_id; ++ rintc_acpi_data[nr_rintc]->hart_id = rintc->hart_id; ++ rintc_acpi_data[nr_rintc]->imsic_addr = rintc->imsic_addr; ++ rintc_acpi_data[nr_rintc]->imsic_size = rintc->imsic_size; ++ nr_rintc++; + + /* + * The ACPI MADT will have one INTC for each CPU (or HART) +@@ -256,6 +382,8 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header, + rc = riscv_intc_init_common(fn, &riscv_intc_chip); + if (rc) + irq_domain_free_fwnode(fn); ++ else ++ acpi_set_irq_model(ACPI_IRQ_MODEL_RINTC, riscv_acpi_get_gsi_domain_id); + + return rc; + } +diff --git a/drivers/irqchip/irq-sg2044-msi.c b/drivers/irqchip/irq-sg2044-msi.c +new file mode 100644 +index 000000000000..176d66eb2615 +--- /dev/null ++++ b/drivers/irqchip/irq-sg2044-msi.c +@@ -0,0 +1,403 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Sophgo SG2044 MSI controller driver. ++ * ++ * Copyright (C) 2025 Sophgo Tech Co., Ltd. ++ * http://www.sophgo.com ++ * ++ * Author: Lionel Li ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct irq_domain *sophgo_get_msi_irq_domain(void); ++ ++#define MAX_IRQ_NUMBER 512 ++ ++/* ++ * here we assume all plic hwirq and msi hwirq ++ * (for PCIe Interrupt Controller, pic) should be contiguous. ++ * msi hwirq is index of bitmap (both software and ++ * hardware), and starts from 0. ++ * so we use msi hwirq as index to get plic hwirq and its ++ * irq data. ++ * msi hwirq is written to Top reg for triggering irq ++ * by a PCIe device. ++ * now we pre-requested plic interrupt, but may try request ++ * plic interrupt when needed, like gicp_irq_domain_alloc. ++ */ ++struct sg2044_msi_data { ++ struct platform_device *pdev; ++ struct irq_domain *domain; ++ struct irq_chip *chip; ++ u32 num_irqs; /* The number of vectors for MSIs */ ++ unsigned long *msi_map; ++ int reg_bitwidth; ++ ++ spinlock_t lock; ++ ++ void __iomem *reg_set; ++ void __iomem *reg_clr; ++ ++ phys_addr_t reg_set_phys; ++ ++ irq_hw_number_t plic_hwirqs[MAX_IRQ_NUMBER]; ++ int plic_irqs[MAX_IRQ_NUMBER]; ++ int msi_to_plic[MAX_IRQ_NUMBER]; /* mapping from msi hwirq to plic hwirq */ ++ struct irq_data *plic_irq_datas[MAX_IRQ_NUMBER]; ++}; ++ ++// workaround for using in other modules ++struct sg2044_msi_data *msi_data; ++ ++struct irq_domain *sophgo_get_msi_irq_domain(void) ++{ ++ if (msi_data) ++ return msi_data->domain; ++ else ++ return NULL; ++} ++ ++static int sg2044_msi_domain_translate(struct irq_domain *d, ++ struct irq_fwspec *fwspec, ++ unsigned long *hwirq, ++ unsigned int *type) ++{ ++ struct sg2044_msi_data *data = d->host_data; ++ ++ if (fwspec->param_count != 2) ++ return -EINVAL; ++ if (fwspec->param[1] >= data->num_irqs) ++ return -EINVAL; ++ ++ *hwirq = fwspec->param[0]; ++ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; ++ pr_debug("%s hwirq %d, flag %d\n", __func__, fwspec->param[0], fwspec->param[1]); ++ return 0; ++} ++ ++static int sg2044_msi_domain_alloc(struct irq_domain *domain, ++ unsigned int virq, unsigned int nr_irqs, ++ void *args) ++{ ++ unsigned long flags; ++ irq_hw_number_t hwirq; ++ int i, ret; ++ struct sg2044_msi_data *data = domain->host_data; ++ ++ /* dynamically alloc hwirq */ ++ spin_lock_irqsave(&data->lock, flags); ++ ret = bitmap_find_free_region(data->msi_map, data->num_irqs, ++ order_base_2(nr_irqs)); ++ spin_unlock_irqrestore(&data->lock, flags); ++ ++ if (ret < 0) { ++ pr_err("%s failed to alloc irq %d, total %d\n", __func__, virq, nr_irqs); ++ return -ENOSPC; ++ } ++ ++ hwirq = ret; ++ for (i = 0; i < nr_irqs; i++) { ++ irq_domain_set_info(domain, virq + i, hwirq + i, data->chip, ++ data, handle_edge_irq, NULL, NULL); ++ ++ data->msi_to_plic[hwirq + i] = data->plic_hwirqs[hwirq + i]; ++ } ++ ++ pr_debug("%s hwirq %ld, irq %d, plic irq %d, total %d\n", __func__, ++ hwirq, virq, data->plic_irqs[hwirq], nr_irqs); ++ return 0; ++} ++ ++static void sg2044_msi_domain_free(struct irq_domain *domain, ++ unsigned int virq, unsigned int nr_irqs) ++{ ++ struct irq_data *d = irq_domain_get_irq_data(domain, virq); ++ struct sg2044_msi_data *data = irq_data_get_irq_chip_data(d); ++ unsigned long flags; ++ ++ pr_debug("%s hwirq %ld, irq %d, total %d\n", __func__, d->hwirq, virq, nr_irqs); ++ ++ spin_lock_irqsave(&data->lock, flags); ++ bitmap_release_region(data->msi_map, d->hwirq, ++ order_base_2(nr_irqs)); ++ spin_unlock_irqrestore(&data->lock, flags); ++} ++ ++static const struct irq_domain_ops sg2044_msi_domain_ops = { ++ .translate = sg2044_msi_domain_translate, ++ .alloc = sg2044_msi_domain_alloc, ++ .free = sg2044_msi_domain_free, ++}; ++ ++static void sg2044_msi_ack_irq(struct irq_data *d) ++{ ++ struct sg2044_msi_data *data = irq_data_get_irq_chip_data(d); ++ int reg_off = 0; ++ struct irq_data *plic_irq_data = data->plic_irq_datas[d->hwirq]; ++ ++ reg_off = d->hwirq; ++ writel(0, (unsigned int *)data->reg_clr + reg_off); ++ ++ pr_debug("%s %ld, parent %s/%ld\n", __func__, d->hwirq, ++ plic_irq_data->domain->name, plic_irq_data->hwirq); ++ if (plic_irq_data->chip->irq_ack) ++ plic_irq_data->chip->irq_ack(plic_irq_data); ++} ++ ++static void sg2044_msi_mask_irq(struct irq_data *d) ++{ ++ struct sg2044_msi_data *data = irq_data_get_irq_chip_data(d); ++ struct irq_data *plic_irq_data = data->plic_irq_datas[d->hwirq]; ++ ++ pr_debug("%s %ld, parent %s/%ld\n", __func__, d->hwirq, ++ plic_irq_data->domain->name, plic_irq_data->hwirq); ++ plic_irq_data->chip->irq_mask(plic_irq_data); ++} ++ ++static void sg2044_msi_unmask_irq(struct irq_data *d) ++{ ++ struct sg2044_msi_data *data = irq_data_get_irq_chip_data(d); ++ struct irq_data *plic_irq_data = data->plic_irq_datas[d->hwirq]; ++ ++ pr_debug("%s %ld, parent %s/%ld\n", __func__, d->hwirq, ++ plic_irq_data->domain->name, plic_irq_data->hwirq); ++ plic_irq_data->chip->irq_unmask(plic_irq_data); ++} ++ ++static void sg2044_msi_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) ++{ ++ struct sg2044_msi_data *data = irq_data_get_irq_chip_data(d); ++ ++ msg->address_lo = lower_32_bits(data->reg_set_phys) + 4 * (d->hwirq / 32); ++ msg->address_hi = upper_32_bits(data->reg_set_phys); ++ msg->data = d->hwirq % 32; ++ ++ pr_debug("%s msi#%d: address_hi %#x, address_lo %#x, data %#x\n", __func__, ++ (int)d->hwirq, msg->address_hi, msg->address_lo, msg->data); ++} ++ ++static int sg2044_msi_set_affinity(struct irq_data *d, ++ const struct cpumask *mask, bool force) ++{ ++ struct sg2044_msi_data *data = irq_data_get_irq_chip_data(d); ++ struct irq_data *plic_irq_data = data->plic_irq_datas[d->hwirq]; ++ ++ irq_data_update_effective_affinity(d, mask); ++ if (plic_irq_data->chip->irq_set_affinity) ++ return plic_irq_data->chip->irq_set_affinity(plic_irq_data, mask, force); ++ else ++ return -EINVAL; ++} ++ ++static int sg2044_msi_set_type(struct irq_data *d, u32 type) ++{ ++ /* ++ * dummy function, so __irq_set_trigger can continue to set ++ * correct trigger type. ++ */ ++ return 0; ++} ++ ++static struct irq_chip sg2044_msi_irq_chip = { ++ .name = "SG2044 MSI", ++ .irq_ack = sg2044_msi_ack_irq, ++ .irq_mask = sg2044_msi_mask_irq, ++ .irq_unmask = sg2044_msi_unmask_irq, ++ .irq_compose_msi_msg = sg2044_msi_setup_msi_msg, ++ .irq_set_affinity = sg2044_msi_set_affinity, ++ .irq_set_type = sg2044_msi_set_type, ++}; ++ ++static int sg2044_get_msi_hwirq(struct sg2044_msi_data *data, int plic_hwirq) ++{ ++ int i; ++ ++ for (i = 0; i < data->num_irqs; i++) { ++ if (data->msi_to_plic[i] == plic_hwirq) ++ break; ++ } ++ ++ return i; ++} ++ ++static void sg2044_msi_irq_handler(struct irq_desc *plic_desc) ++{ ++ struct irq_chip *plic_chip = irq_desc_get_chip(plic_desc); ++ struct sg2044_msi_data *data = irq_desc_get_handler_data(plic_desc); ++ irq_hw_number_t plic_hwirq = irq_desc_get_irq_data(plic_desc)->hwirq; ++ irq_hw_number_t sg2044_msi_hwirq; ++ int sg2044_msi_irq, ret; ++ ++ chained_irq_enter(plic_chip, plic_desc); ++ ++ ++ sg2044_msi_hwirq = sg2044_get_msi_hwirq(data, plic_hwirq); ++ if (sg2044_msi_hwirq < data->num_irqs) { ++ sg2044_msi_irq = irq_find_mapping(data->domain, sg2044_msi_hwirq); ++ pr_debug("%s plic hwirq %ld, msi hwirq %ld, msi irq %d\n", __func__, ++ plic_hwirq, sg2044_msi_hwirq, sg2044_msi_irq); ++ if (sg2044_msi_irq) ++ ret = generic_handle_irq(sg2044_msi_irq); ++ pr_debug("%s handled msi irq %d, %d\n", __func__, sg2044_msi_irq, ret); ++ } else { ++ pr_debug("%s not found msi hwirq for plic hwirq %ld\n", __func__, plic_hwirq); ++ /* ++ * workaround, ack unexpected(unregistered) interrupt ++ */ ++ writel(1 << (plic_hwirq - data->plic_hwirqs[0]), data->reg_clr); ++ } ++ ++ chained_irq_exit(plic_chip, plic_desc); ++} ++ ++static int sg2044_msi_probe(struct platform_device *pdev) ++{ ++ struct sg2044_msi_data *data; ++ struct resource *res; ++ struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev); ++ int ret, i; ++ ++ /* alloc private data */ ++ data = kzalloc(sizeof(struct sg2044_msi_data), GFP_KERNEL); ++ if (!data) ++ return -ENOMEM; ++ platform_set_drvdata(pdev, data); ++ data->pdev = pdev; ++ spin_lock_init(&data->lock); ++ ++ if (device_property_read_u32(&pdev->dev, "reg-bitwidth", &data->reg_bitwidth)) ++ data->reg_bitwidth = 32; ++ ++ if (device_property_read_u32(&pdev->dev, "sophgo,msi-num-vecs", &data->num_irqs)) ++ data->num_irqs = MAX_IRQ_NUMBER; ++ ++ if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "set"); ++ else if (ACPI_COMPANION(&pdev->dev)) ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ data->reg_set = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(data->reg_set)) { ++ dev_err(&pdev->dev, "failed map set register\n"); ++ ret = PTR_ERR(data->reg_set); ++ goto out; ++ } ++ ++ data->reg_set_phys = res->start; ++ if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "clr"); ++ else if (ACPI_COMPANION(&pdev->dev)) ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); ++ data->reg_clr = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(data->reg_clr)) { ++ dev_err(&pdev->dev, "failed map clear register\n"); ++ ret = PTR_ERR(data->reg_clr); ++ goto out; ++ } ++ ++ for (i = 0; i < data->num_irqs; i++) { ++ int irq; ++ ++ irq = platform_get_irq(pdev, i); ++ if (irq < 0) ++ break; ++ ++ data->plic_irqs[i] = irq; ++ data->plic_irq_datas[i] = irq_get_irq_data(irq); ++ if (!data->plic_irq_datas[i]) { ++ dev_err(&pdev->dev, "Invalid IRQ: %d\n", irq); ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ data->plic_hwirqs[i] = data->plic_irq_datas[i]->hwirq; ++ dev_dbg(&pdev->dev, "msi%d: plic hwirq %ld, plic irq %d\n", i, ++ data->plic_hwirqs[i], data->plic_irqs[i]); ++ } ++ ++ dev_info(&pdev->dev, "map %d PLIC interrupts\n", data->num_irqs); ++ ++ data->msi_map = bitmap_zalloc(data->num_irqs, GFP_KERNEL); ++ if (!data->msi_map) ++ return -ENOMEM; ++ ++ /* create IRQ domain */ ++ data->domain = irq_domain_create_linear(fwnode, data->num_irqs, ++ &sg2044_msi_domain_ops, data); ++ if (!data->domain) { ++ dev_err(&pdev->dev, "create linear irq doamin failed\n"); ++ ret = -ENODEV; ++ goto out; ++ } ++ data->chip = &sg2044_msi_irq_chip; ++ ++ /* ++ * workaround to deal with IRQ conflict with TPU driver, ++ * skip the firt IRQ and mark it as used. ++ */ ++ for (i = 0; i < data->num_irqs; i++) ++ irq_set_chained_handler_and_data(data->plic_irqs[i], ++ sg2044_msi_irq_handler, data); ++ ++ irq_domain_update_bus_token(data->domain, DOMAIN_BUS_NEXUS); ++ if (msi_data) ++ dev_err(&pdev->dev, "msi_data is not empty, %s\n", ++ dev_name(&msi_data->pdev->dev)); ++ msi_data = data; ++ ++#ifdef CONFIG_ACPI ++ if (!acpi_disabled) ++ acpi_dev_clear_dependencies(ACPI_COMPANION(fwnode->dev)); ++#endif ++ ++ return 0; ++ ++out: ++ if (data->reg_set) ++ iounmap(data->reg_set); ++ if (data->reg_clr) ++ iounmap(data->reg_clr); ++ kfree(data); ++ return ret; ++} ++ ++static const struct of_device_id sg2044_msi_of_match[] = { ++ { .compatible = "sophgo,sg2044-msi", }, ++ {}, ++}; ++ ++#ifdef CONFIG_ACPI ++static const struct acpi_device_id sg2044_msi_acpi_match[] = { ++ { "SOPH0001", 0 }, ++ {} ++}; ++#endif ++ ++static struct platform_driver sg2044_msi_driver = { ++ .driver = { ++ .name = "sg2044-msi", ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(sg2044_msi_of_match), ++ .acpi_match_table = ACPI_PTR(sg2044_msi_acpi_match), ++ }, ++ .probe = sg2044_msi_probe, ++}; ++ ++static int __init sg2044_msi_init(void) ++{ ++ return platform_driver_register(&sg2044_msi_driver); ++} ++ ++arch_initcall(sg2044_msi_init); +diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c +index 572899669154..8744f09cc99c 100644 +--- a/drivers/irqchip/irq-sifive-plic.c ++++ b/drivers/irqchip/irq-sifive-plic.c +@@ -3,7 +3,8 @@ + * Copyright (C) 2017 SiFive + * Copyright (C) 2018 Christoph Hellwig + */ +-#define pr_fmt(fmt) "plic: " fmt ++#define pr_fmt(fmt) "riscv-plic: " fmt ++#include + #include + #include + #include +@@ -48,6 +49,8 @@ + #define CONTEXT_ENABLE_BASE 0x2000 + #define CONTEXT_ENABLE_SIZE 0x80 + ++#define PENDING_BASE 0x1000 ++ + /* + * Each hart context has a set of control registers associated with it. Right + * now there's only two: a source priority threshold over which the hart will +@@ -62,14 +65,18 @@ + #define PLIC_ENABLE_THRESHOLD 0 + + #define PLIC_QUIRK_EDGE_INTERRUPT 0 ++#define PLIC_QUIRK_CLAIM_REGISTER 1 + + struct plic_priv { ++ struct fwnode_handle *fwnode; + struct cpumask lmask; + struct irq_domain *irqdomain; + void __iomem *regs; + unsigned long plic_quirks; + unsigned int nr_irqs; + unsigned long *prio_save; ++ u32 gsi_base; ++ int acpi_plic_id; + }; + + struct plic_handler { +@@ -85,7 +92,7 @@ struct plic_handler { + struct plic_priv *priv; + }; + static int plic_parent_irq __ro_after_init; +-static bool plic_cpuhp_setup_done __ro_after_init; ++static bool plic_global_setup_done __ro_after_init; + static DEFINE_PER_CPU(struct plic_handler, plic_handlers); + + static int plic_irq_set_type(struct irq_data *d, unsigned int type); +@@ -103,9 +110,11 @@ static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable) + + static void plic_toggle(struct plic_handler *handler, int hwirq, int enable) + { +- raw_spin_lock(&handler->enable_lock); ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&handler->enable_lock, flags); + __plic_toggle(handler->enable_base, hwirq, enable); +- raw_spin_unlock(&handler->enable_lock); ++ raw_spin_unlock_irqrestore(&handler->enable_lock, flags); + } + + static inline void plic_irq_toggle(const struct cpumask *mask, +@@ -163,15 +172,12 @@ static int plic_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, bool force) + { + unsigned int cpu; +- struct cpumask amask; + struct plic_priv *priv = irq_data_get_irq_chip_data(d); + +- cpumask_and(&amask, &priv->lmask, mask_val); +- + if (force) +- cpu = cpumask_first(&amask); ++ cpu = cpumask_first_and(&priv->lmask, mask_val); + else +- cpu = cpumask_any_and(&amask, cpu_online_mask); ++ cpu = cpumask_first_and_and(&priv->lmask, mask_val, cpu_online_mask); + + if (cpu >= nr_cpu_ids) + return -EINVAL; +@@ -243,6 +249,7 @@ static int plic_irq_set_type(struct irq_data *d, unsigned int type) + static int plic_irq_suspend(void) + { + unsigned int i, cpu; ++ unsigned long flags; + u32 __iomem *reg; + struct plic_priv *priv; + +@@ -260,12 +267,12 @@ static int plic_irq_suspend(void) + if (!handler->present) + continue; + +- raw_spin_lock(&handler->enable_lock); ++ raw_spin_lock_irqsave(&handler->enable_lock, flags); + for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { + reg = handler->enable_base + i * sizeof(u32); + handler->enable_save[i] = readl(reg); + } +- raw_spin_unlock(&handler->enable_lock); ++ raw_spin_unlock_irqrestore(&handler->enable_lock, flags); + } + + return 0; +@@ -274,12 +281,13 @@ static int plic_irq_suspend(void) + static void plic_irq_resume(void) + { + unsigned int i, index, cpu; ++ unsigned long flags; + u32 __iomem *reg; + struct plic_priv *priv; + + priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; + +- for (i = 0; i < priv->nr_irqs; i++) { ++ for (i = 1; i < priv->nr_irqs; i++) { + index = BIT_WORD(i); + writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0, + priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); +@@ -291,12 +299,12 @@ static void plic_irq_resume(void) + if (!handler->present) + continue; + +- raw_spin_lock(&handler->enable_lock); ++ raw_spin_lock_irqsave(&handler->enable_lock, flags); + for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { + reg = handler->enable_base + i * sizeof(u32); + writel(handler->enable_save[i], reg); + } +- raw_spin_unlock(&handler->enable_lock); ++ raw_spin_unlock_irqrestore(&handler->enable_lock, flags); + } + } + +@@ -324,6 +332,10 @@ static int plic_irq_domain_translate(struct irq_domain *d, + { + struct plic_priv *priv = d->host_data; + ++ /* For DT, gsi_base is always zero. */ ++ if (fwspec->param[0] >= priv->gsi_base) ++ fwspec->param[0] = fwspec->param[0] - priv->gsi_base; ++ + if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) + return irq_domain_translate_twocell(d, fwspec, hwirq, type); + +@@ -357,6 +369,82 @@ static const struct irq_domain_ops plic_irqdomain_ops = { + .free = irq_domain_free_irqs_top, + }; + ++static bool plic_check_enable_first_pending(u32 ie[]) ++{ ++ struct plic_handler *handler = this_cpu_ptr(&plic_handlers); ++ void __iomem *enable = handler->enable_base; ++ void __iomem *pending = handler->priv->regs + PENDING_BASE; ++ int nr_irqs = handler->priv->nr_irqs; ++ int nr_irq_groups = (nr_irqs + 31) / 32; ++ bool is_pending = false; ++ int i, j; ++ ++ raw_spin_lock(&handler->enable_lock); ++ ++ // Read current interrupt enables ++ for (i = 0; i < nr_irq_groups; i++) ++ ie[i] = readl(enable + i * sizeof(u32)); ++ ++ // Check for pending interrupts and enable only the first one found ++ for (i = 0; i < nr_irq_groups; i++) { ++ u32 pending_irqs = readl(pending + i * sizeof(u32)) & ie[i]; ++ ++ if (pending_irqs) { ++ int nbit = __ffs(pending_irqs); ++ ++ for (j = 0; j < nr_irq_groups; j++) ++ writel((i == j)?(1 << nbit):0, enable + j * sizeof(u32)); ++ is_pending = true; ++ break; ++ } ++ } ++ ++ raw_spin_unlock(&handler->enable_lock); ++ ++ return is_pending; ++} ++ ++static void plic_restore_enable_state(u32 ie[]) ++{ ++ struct plic_handler *handler = this_cpu_ptr(&plic_handlers); ++ void __iomem *enable = handler->enable_base; ++ int nr_irqs = handler->priv->nr_irqs; ++ int nr_irq_groups = (nr_irqs + 31) / 32; ++ int i; ++ ++ raw_spin_lock(&handler->enable_lock); ++ ++ for (i = 0; i < nr_irq_groups; i++) ++ writel(ie[i], enable + i * sizeof(u32)); ++ ++ raw_spin_unlock(&handler->enable_lock); ++} ++ ++static irq_hw_number_t plic_get_hwirq(void) ++{ ++ struct plic_handler *handler = this_cpu_ptr(&plic_handlers); ++ struct plic_priv *priv = handler->priv; ++ void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; ++ irq_hw_number_t hwirq; ++ u32 ie[32] = {0}; ++ ++ /* ++ * Due to the implementation of the claim register in the UltraRISC DP1000 ++ * platform PLIC not conforming to the specification, this is a hardware ++ * bug. Therefore, when an interrupt is pending, we need to disable the other ++ * interrupts before reading the claim register. After processing the interrupt, ++ * we should then restore the enable register. ++ */ ++ if (test_bit(PLIC_QUIRK_CLAIM_REGISTER, &priv->plic_quirks)) { ++ hwirq = plic_check_enable_first_pending(ie)?readl(claim):0; ++ plic_restore_enable_state(ie); ++ } else { ++ hwirq = readl(claim); ++ } ++ ++ return hwirq; ++} ++ + /* + * Handling an interrupt is a two-step process: first you claim the interrupt + * by reading the claim register, then you complete the interrupt by writing +@@ -367,19 +455,19 @@ static void plic_handle_irq(struct irq_desc *desc) + { + struct plic_handler *handler = this_cpu_ptr(&plic_handlers); + struct irq_chip *chip = irq_desc_get_chip(desc); +- void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; + irq_hw_number_t hwirq; + + WARN_ON_ONCE(!handler->present); + + chained_irq_enter(chip, desc); + +- while ((hwirq = readl(claim))) { ++ while ((hwirq = plic_get_hwirq())) { + int err = generic_handle_domain_irq(handler->priv->irqdomain, + hwirq); +- if (unlikely(err)) +- pr_warn_ratelimited("can't find mapping for hwirq %lu\n", +- hwirq); ++ if (unlikely(err)) { ++ pr_warn_ratelimited("%pfwP: can't find mapping for hwirq %lu\n", ++ handler->priv->fwnode, hwirq); ++ } + } + + chained_irq_exit(chip, desc); +@@ -407,71 +495,178 @@ static int plic_starting_cpu(unsigned int cpu) + enable_percpu_irq(plic_parent_irq, + irq_get_trigger_type(plic_parent_irq)); + else +- pr_warn("cpu%d: parent irq not available\n", cpu); ++ pr_warn("%pfwP: cpu%d: parent irq not available\n", ++ handler->priv->fwnode, cpu); + plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD); + + return 0; + } + +-static int __init __plic_init(struct device_node *node, +- struct device_node *parent, +- unsigned long plic_quirks) ++static const struct of_device_id plic_match[] = { ++ { .compatible = "sifive,plic-1.0.0" }, ++ { .compatible = "riscv,plic0" }, ++ { .compatible = "andestech,nceplic100", ++ .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, ++ { .compatible = "thead,c900-plic", ++ .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, ++ { .compatible = "ultrarisc,dp1000-plic", ++ .data = (const void *)BIT(PLIC_QUIRK_CLAIM_REGISTER) }, ++ {} ++}; ++ ++#ifdef CONFIG_ACPI ++ ++static const struct acpi_device_id plic_acpi_match[] = { ++ { "RSCV0001", 0 }, ++ {} ++}; ++MODULE_DEVICE_TABLE(acpi, plic_acpi_match); ++ ++#endif ++static int plic_parse_nr_irqs_and_contexts(struct fwnode_handle *fwnode, ++ u32 *nr_irqs, u32 *nr_contexts, ++ u32 *gsi_base, u32 *id) + { +- int error = 0, nr_contexts, nr_handlers = 0, i; +- u32 nr_irqs; +- struct plic_priv *priv; +- struct plic_handler *handler; +- unsigned int cpu; ++ int rc; + +- priv = kzalloc(sizeof(*priv), GFP_KERNEL); +- if (!priv) +- return -ENOMEM; ++ if (!is_of_node(fwnode)) { ++ rc = riscv_acpi_get_gsi_info(fwnode, gsi_base, id, nr_irqs, NULL); ++ if (rc) { ++ pr_err("%pfwP: failed to find GSI mapping\n", fwnode); ++ return rc; ++ } + +- priv->plic_quirks = plic_quirks; ++ *nr_contexts = acpi_rintc_get_plic_nr_contexts(*id); ++ if (WARN_ON(!*nr_contexts)) { ++ pr_err("%pfwP: no PLIC context available\n", fwnode); ++ return -EINVAL; ++ } + +- priv->regs = of_iomap(node, 0); +- if (WARN_ON(!priv->regs)) { +- error = -EIO; +- goto out_free_priv; ++ return 0; + } + +- error = -EINVAL; +- of_property_read_u32(node, "riscv,ndev", &nr_irqs); +- if (WARN_ON(!nr_irqs)) +- goto out_iounmap; ++ rc = of_property_read_u32(to_of_node(fwnode), "riscv,ndev", nr_irqs); ++ if (rc) { ++ pr_err("%pfwP: riscv,ndev property not available\n", fwnode); ++ return rc; ++ } + +- priv->nr_irqs = nr_irqs; ++ *nr_contexts = of_irq_count(to_of_node(fwnode)); ++ if (WARN_ON(!(*nr_contexts))) { ++ pr_err("%pfwP: no PLIC context available\n", fwnode); ++ return -EINVAL; ++ } + +- priv->prio_save = bitmap_alloc(nr_irqs, GFP_KERNEL); +- if (!priv->prio_save) +- goto out_free_priority_reg; ++ *gsi_base = 0; ++ *id = 0; + +- nr_contexts = of_irq_count(node); +- if (WARN_ON(!nr_contexts)) +- goto out_free_priority_reg; ++ return 0; ++} + +- error = -ENOMEM; +- priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1, +- &plic_irqdomain_ops, priv); +- if (WARN_ON(!priv->irqdomain)) +- goto out_free_priority_reg; ++static int plic_parse_context_parent(struct fwnode_handle *fwnode, u32 context, ++ u32 *parent_hwirq, int *parent_cpu, u32 id) ++{ ++ struct of_phandle_args parent; ++ unsigned long hartid; ++ int rc; ++ ++ if (!is_of_node(fwnode)) { ++ hartid = acpi_rintc_ext_parent_to_hartid(id, context); ++ if (hartid == INVALID_HARTID) ++ return -EINVAL; ++ ++ *parent_cpu = riscv_hartid_to_cpuid(hartid); ++ *parent_hwirq = RV_IRQ_EXT; ++ return 0; ++ } + +- for (i = 0; i < nr_contexts; i++) { +- struct of_phandle_args parent; +- irq_hw_number_t hwirq; +- int cpu; +- unsigned long hartid; ++ rc = of_irq_parse_one(to_of_node(fwnode), context, &parent); ++ if (rc) ++ return rc; ++ ++ rc = riscv_of_parent_hartid(parent.np, &hartid); ++ if (rc) ++ return rc; ++ ++ *parent_hwirq = parent.args[0]; ++ *parent_cpu = riscv_hartid_to_cpuid(hartid); ++ return 0; ++} ++ ++static int plic_probe(struct fwnode_handle *fwnode) ++{ ++ int error = 0, nr_contexts, nr_handlers = 0, cpu, i; ++ unsigned long plic_quirks = 0; ++ struct plic_handler *handler; ++ u32 nr_irqs, parent_hwirq; ++ struct plic_priv *priv; ++ irq_hw_number_t hwirq; ++ void __iomem *regs; ++ int id, context_id; ++ u32 gsi_base; ++ ++ if (is_of_node(fwnode)) { ++ const struct of_device_id *id; ++ ++ id = of_match_node(plic_match, to_of_node(fwnode)); ++ if (id) ++ plic_quirks = (unsigned long)id->data; ++ ++ regs = of_iomap(to_of_node(fwnode), 0); ++ if (!regs) ++ return -ENOMEM; ++ } else { ++ regs = devm_platform_ioremap_resource(to_platform_device(fwnode->dev), 0); ++ if (IS_ERR(regs)) ++ return PTR_ERR(regs); ++ } ++ ++ error = plic_parse_nr_irqs_and_contexts(fwnode, &nr_irqs, &nr_contexts, &gsi_base, &id); ++ if (error) ++ goto fail_free_regs; ++ ++ priv = kzalloc(sizeof(*priv), GFP_KERNEL); ++ if (!priv) { ++ error = -ENOMEM; ++ goto fail_free_regs; ++ } ++ ++ priv->fwnode = fwnode; ++ priv->plic_quirks = plic_quirks; ++ priv->nr_irqs = nr_irqs; ++ priv->regs = regs; ++ priv->gsi_base = gsi_base; ++ priv->acpi_plic_id = id; ++ ++ priv->prio_save = bitmap_zalloc(nr_irqs, GFP_KERNEL); ++ if (!priv->prio_save) { ++ error = -ENOMEM; ++ goto fail_free_priv; ++ } + +- if (of_irq_parse_one(node, i, &parent)) { +- pr_err("failed to parse parent for context %d.\n", i); ++ for (i = 0; i < nr_contexts; i++) { ++ error = plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, ++ priv->acpi_plic_id); ++ if (error) { ++ pr_warn("%pfwP: hwirq for context%d not found\n", fwnode, i); + continue; + } + ++ if (is_of_node(fwnode)) { ++ context_id = i; ++ } else { ++ context_id = acpi_rintc_get_plic_context(priv->acpi_plic_id, i); ++ if (context_id == INVALID_CONTEXT) { ++ pr_warn("%pfwP: invalid context id for context%d\n", fwnode, i); ++ continue; ++ } ++ } ++ + /* + * Skip contexts other than external interrupts for our + * privilege level. + */ +- if (parent.args[0] != RV_IRQ_EXT) { ++ if (parent_hwirq != RV_IRQ_EXT) { + /* Disable S-mode enable bits if running in M-mode. */ + if (IS_ENABLED(CONFIG_RISCV_M_MODE)) { + void __iomem *enable_base = priv->regs + +@@ -484,26 +679,11 @@ static int __init __plic_init(struct device_node *node, + continue; + } + +- error = riscv_of_parent_hartid(parent.np, &hartid); +- if (error < 0) { +- pr_warn("failed to parse hart ID for context %d.\n", i); +- continue; +- } +- +- cpu = riscv_hartid_to_cpuid(hartid); + if (cpu < 0) { +- pr_warn("Invalid cpuid for context %d\n", i); ++ pr_warn("%pfwP: Invalid cpuid for context %d\n", fwnode, i); + continue; + } + +- /* Find parent domain and register chained handler */ +- if (!plic_parent_irq && irq_find_host(parent.np)) { +- plic_parent_irq = irq_of_parse_and_map(node, i); +- if (plic_parent_irq) +- irq_set_chained_handler(plic_parent_irq, +- plic_handle_irq); +- } +- + /* + * When running in M-mode we need to ignore the S-mode handler. + * Here we assume it always comes later, but that might be a +@@ -511,7 +691,7 @@ static int __init __plic_init(struct device_node *node, + */ + handler = per_cpu_ptr(&plic_handlers, cpu); + if (handler->present) { +- pr_warn("handler already present for context %d.\n", i); ++ pr_warn("%pfwP: handler already present for context %d.\n", fwnode, i); + plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); + goto done; + } +@@ -519,16 +699,16 @@ static int __init __plic_init(struct device_node *node, + cpumask_set_cpu(cpu, &priv->lmask); + handler->present = true; + handler->hart_base = priv->regs + CONTEXT_BASE + +- i * CONTEXT_SIZE; ++ context_id * CONTEXT_SIZE; + raw_spin_lock_init(&handler->enable_lock); + handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE + +- i * CONTEXT_ENABLE_SIZE; ++ context_id * CONTEXT_ENABLE_SIZE; + handler->priv = priv; + +- handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32), +- sizeof(*handler->enable_save), GFP_KERNEL); ++ handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32), ++ sizeof(*handler->enable_save), GFP_KERNEL); + if (!handler->enable_save) +- goto out_free_enable_reg; ++ goto fail_cleanup_contexts; + done: + for (hwirq = 1; hwirq <= nr_irqs; hwirq++) { + plic_toggle(handler, hwirq, 0); +@@ -538,52 +718,101 @@ static int __init __plic_init(struct device_node *node, + nr_handlers++; + } + ++ priv->irqdomain = irq_domain_create_linear(fwnode, nr_irqs + 1, ++ &plic_irqdomain_ops, priv); ++ if (WARN_ON(!priv->irqdomain)) ++ goto fail_cleanup_contexts; ++ + /* +- * We can have multiple PLIC instances so setup cpuhp state +- * and register syscore operations only when context handler +- * for current/boot CPU is present. ++ * We can have multiple PLIC instances so setup global state ++ * and register syscore operations only once after context ++ * handlers of all online CPUs are initialized. + */ +- handler = this_cpu_ptr(&plic_handlers); +- if (handler->present && !plic_cpuhp_setup_done) { +- cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, +- "irqchip/sifive/plic:starting", +- plic_starting_cpu, plic_dying_cpu); +- register_syscore_ops(&plic_irq_syscore_ops); +- plic_cpuhp_setup_done = true; ++ if (!plic_global_setup_done) { ++ struct irq_domain *domain; ++ bool global_setup = true; ++ ++ for_each_online_cpu(cpu) { ++ handler = per_cpu_ptr(&plic_handlers, cpu); ++ if (!handler->present) { ++ global_setup = false; ++ break; ++ } ++ } ++ ++ if (global_setup) { ++ /* Find parent domain and register chained handler */ ++ domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); ++ if (domain) ++ plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); ++ if (plic_parent_irq) ++ irq_set_chained_handler(plic_parent_irq, plic_handle_irq); ++ ++ cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, ++ "irqchip/sifive/plic:starting", ++ plic_starting_cpu, plic_dying_cpu); ++ register_syscore_ops(&plic_irq_syscore_ops); ++ plic_global_setup_done = true; ++ } + } + +- pr_info("%pOFP: mapped %d interrupts with %d handlers for" +- " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts); ++#ifdef CONFIG_ACPI ++ if (!acpi_disabled) ++ acpi_dev_clear_dependencies(ACPI_COMPANION(fwnode->dev)); ++#endif ++ ++ pr_info("%pfwP: mapped %d interrupts with %d handlers for %d contexts.\n", ++ fwnode, nr_irqs, nr_handlers, nr_contexts); + return 0; + +-out_free_enable_reg: +- for_each_cpu(cpu, cpu_present_mask) { ++fail_cleanup_contexts: ++ for (i = 0; i < nr_contexts; i++) { ++ if (plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, priv->acpi_plic_id)) ++ continue; ++ if (parent_hwirq != RV_IRQ_EXT || cpu < 0) ++ continue; ++ + handler = per_cpu_ptr(&plic_handlers, cpu); ++ handler->present = false; ++ handler->hart_base = NULL; ++ handler->enable_base = NULL; + kfree(handler->enable_save); ++ handler->enable_save = NULL; ++ handler->priv = NULL; + } +-out_free_priority_reg: +- kfree(priv->prio_save); +-out_iounmap: +- iounmap(priv->regs); +-out_free_priv: ++ bitmap_free(priv->prio_save); ++fail_free_priv: + kfree(priv); ++fail_free_regs: ++ iounmap(regs); + return error; + } + +-static int __init plic_init(struct device_node *node, +- struct device_node *parent) ++static int plic_platform_probe(struct platform_device *pdev) + { +- return __plic_init(node, parent, 0); ++ return plic_probe(pdev->dev.fwnode); + } + +-IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init); +-IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */ ++static struct platform_driver plic_driver = { ++ .driver = { ++ .name = "riscv-plic", ++ .of_match_table = plic_match, ++ .suppress_bind_attrs = true, ++ .acpi_match_table = ACPI_PTR(plic_acpi_match), ++ }, ++ .probe = plic_platform_probe, ++}; ++ ++static int __init plic_init(void) ++{ ++ return platform_driver_register(&plic_driver); ++} ++arch_initcall(plic_init); + +-static int __init plic_edge_init(struct device_node *node, +- struct device_node *parent) ++static int __init plic_early_probe(struct device_node *node, ++ struct device_node *parent) + { +- return __plic_init(node, parent, BIT(PLIC_QUIRK_EDGE_INTERRUPT)); ++ return plic_probe(&node->fwnode); + } + +-IRQCHIP_DECLARE(andestech_nceplic100, "andestech,nceplic100", plic_edge_init); +-IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_edge_init); ++IRQCHIP_DECLARE(riscv, "allwinner,sun20i-d1-plic", plic_early_probe); +diff --git a/drivers/irqchip/irq-thead-c900-aclint-sswi.c b/drivers/irqchip/irq-thead-c900-aclint-sswi.c +new file mode 100644 +index 000000000000..1f24faf9f652 +--- /dev/null ++++ b/drivers/irqchip/irq-thead-c900-aclint-sswi.c +@@ -0,0 +1,351 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2024 Inochi Amaoto ++ */ ++ ++#define pr_fmt(fmt) "thead-c900-aclint-sswi: " fmt ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define THEAD_ACLINT_xSWI_REGISTER_SIZE 4 ++ ++#define THEAD_C9XX_CSR_SXSTATUS 0x5c0 ++#define THEAD_C9XX_SXSTATUS_CLINTEE BIT(17) ++ ++static int sswi_ipi_virq __ro_after_init; ++static DEFINE_PER_CPU(void __iomem *, sswi_cpu_regs); ++ ++static void thead_aclint_sswi_ipi_send(unsigned int cpu) ++{ ++ writel_relaxed(0x1, per_cpu(sswi_cpu_regs, cpu)); ++} ++ ++static void thead_aclint_sswi_ipi_clear(void) ++{ ++ writel_relaxed(0x0, this_cpu_read(sswi_cpu_regs)); ++} ++ ++static void thead_aclint_sswi_ipi_handle(struct irq_desc *desc) ++{ ++ struct irq_chip *chip = irq_desc_get_chip(desc); ++ ++ chained_irq_enter(chip, desc); ++ ++ csr_clear(CSR_IP, IE_SIE); ++ thead_aclint_sswi_ipi_clear(); ++ ++ ipi_mux_process(); ++ ++ chained_irq_exit(chip, desc); ++} ++ ++static int thead_aclint_sswi_starting_cpu(unsigned int cpu) ++{ ++ enable_percpu_irq(sswi_ipi_virq, irq_get_trigger_type(sswi_ipi_virq)); ++ ++ return 0; ++} ++ ++static int thead_aclint_sswi_dying_cpu(unsigned int cpu) ++{ ++ thead_aclint_sswi_ipi_clear(); ++ ++ disable_percpu_irq(sswi_ipi_virq); ++ ++ return 0; ++} ++ ++static int __init thead_aclint_sswi_parse_irq(struct fwnode_handle *fwnode, ++ void __iomem *reg) ++{ ++ struct of_phandle_args parent; ++ unsigned long hartid; ++ u32 contexts, i; ++ int rc, cpu; ++ ++ contexts = of_irq_count(to_of_node(fwnode)); ++ if (!(contexts)) { ++ pr_err("%pfwP: no ACLINT SSWI context available\n", fwnode); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < contexts; i++) { ++ rc = of_irq_parse_one(to_of_node(fwnode), i, &parent); ++ if (rc) ++ return rc; ++ ++ rc = riscv_of_parent_hartid(parent.np, &hartid); ++ if (rc) ++ return rc; ++ ++ if (parent.args[0] != RV_IRQ_SOFT) ++ return -ENOTSUPP; ++ ++ cpu = riscv_hartid_to_cpuid(hartid); ++ ++ per_cpu(sswi_cpu_regs, cpu) = reg + i * THEAD_ACLINT_xSWI_REGISTER_SIZE; ++ } ++ ++ pr_info("%pfwP: register %u CPU%s\n", fwnode, contexts, str_plural(contexts)); ++ ++ return 0; ++} ++ ++static int __init thead_aclint_sswi_probe(struct fwnode_handle *fwnode) ++{ ++ struct irq_domain *domain; ++ void __iomem *reg; ++ int virq, rc; ++ ++ /* If it is T-HEAD CPU, check whether SSWI is enabled */ ++ if (riscv_cached_mvendorid(0) == THEAD_VENDOR_ID && ++ !(csr_read(THEAD_C9XX_CSR_SXSTATUS) & THEAD_C9XX_SXSTATUS_CLINTEE)) ++ return -ENOTSUPP; ++ ++ if (!is_of_node(fwnode)) ++ return -EINVAL; ++ ++ reg = of_iomap(to_of_node(fwnode), 0); ++ if (!reg) ++ return -ENOMEM; ++ ++ /* Parse SSWI setting */ ++ rc = thead_aclint_sswi_parse_irq(fwnode, reg); ++ if (rc < 0) ++ return rc; ++ ++ /* If mulitple SSWI devices are present, do not register irq again */ ++ if (sswi_ipi_virq) ++ return 0; ++ ++ /* Find riscv intc domain and create IPI irq mapping */ ++ domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); ++ if (!domain) { ++ pr_err("%pfwP: Failed to find INTC domain\n", fwnode); ++ return -ENOENT; ++ } ++ ++ sswi_ipi_virq = irq_create_mapping(domain, RV_IRQ_SOFT); ++ if (!sswi_ipi_virq) { ++ pr_err("unable to create ACLINT SSWI IRQ mapping\n"); ++ return -ENOMEM; ++ } ++ ++ /* Register SSWI irq and handler */ ++ virq = ipi_mux_create(BITS_PER_BYTE, thead_aclint_sswi_ipi_send); ++ if (virq <= 0) { ++ pr_err("unable to create muxed IPIs\n"); ++ irq_dispose_mapping(sswi_ipi_virq); ++ return virq < 0 ? virq : -ENOMEM; ++ } ++ ++ irq_set_chained_handler(sswi_ipi_virq, thead_aclint_sswi_ipi_handle); ++ ++ cpuhp_setup_state(CPUHP_AP_IRQ_THEAD_ACLINT_SSWI_STARTING, ++ "irqchip/thead-aclint-sswi:starting", ++ thead_aclint_sswi_starting_cpu, ++ thead_aclint_sswi_dying_cpu); ++ ++ riscv_ipi_set_virq_range(virq, BITS_PER_BYTE, true); ++ ++ /* Announce that SSWI is providing IPIs */ ++ pr_info("providing IPIs using THEAD ACLINT SSWI\n"); ++ ++ return 0; ++} ++ ++static int __init thead_aclint_sswi_early_probe(struct device_node *node, ++ struct device_node *parent) ++{ ++ return thead_aclint_sswi_probe(&node->fwnode); ++} ++IRQCHIP_DECLARE(thead_aclint_sswi, "thead,c900-aclint-sswi", thead_aclint_sswi_early_probe); ++ ++#ifdef CONFIG_ACPI ++ ++static struct fwnode_handle *sswi_acpi_fwnode; ++ ++static int __init sswi_get_parent_hartid(struct fwnode_handle *fwnode, ++ u32 index, unsigned long *hartid) ++{ ++ struct of_phandle_args parent; ++ int rc; ++ ++ if (!is_of_node(fwnode)) { ++ if (hartid) ++ *hartid = acpi_rintc_index_to_hartid(index); ++ ++ if (!hartid || (*hartid == INVALID_HARTID)) ++ return -EINVAL; ++ ++ return 0; ++ } ++ ++ rc = of_irq_parse_one(to_of_node(fwnode), index, &parent); ++ if (rc) ++ return rc; ++ ++ if (parent.args[0] != RV_IRQ_SOFT) ++ return -ENOTSUPP; ++ ++ return riscv_of_parent_hartid(parent.np, hartid); ++} ++ ++static int __init sswi_get_mmio_resource(struct fwnode_handle *fwnode, ++ u32 index, struct resource *res) ++{ ++ if (!is_of_node(fwnode)) ++ return acpi_rintc_get_imsic_mmio_info(index, res); ++ ++ return of_address_to_resource(to_of_node(fwnode), index, res); ++} ++ ++static int __init sswi_early_probe(struct fwnode_handle *fwnode) ++{ ++ struct irq_domain *domain; ++ int virq; ++ ++ /* Find parent domain and register chained handler */ ++ domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); ++ if (!domain) { ++ pr_err("%pfwP: Failed to find INTC domain\n", fwnode); ++ return -ENOENT; ++ } ++ ++ sswi_ipi_virq = irq_create_mapping(domain, RV_IRQ_SOFT); ++ if (!sswi_ipi_virq) { ++ pr_err("%pfwP: Failed to create ACLINT SSWI IRQ mapping\n", fwnode); ++ return -ENOENT; ++ } ++ ++ /* Create IPI multiplexing */ ++ virq = ipi_mux_create(BITS_PER_BYTE, thead_aclint_sswi_ipi_send); ++ if (virq <= 0) ++ return virq < 0 ? virq : -ENOMEM; ++ ++ /* Set vIRQ range */ ++ riscv_ipi_set_virq_range(virq, BITS_PER_BYTE, true); ++ ++ /* Announce that SSWI is providing IPIs */ ++ pr_info("%pfwP: providing IPIs\n", fwnode); ++ ++ /* Setup chained handler to the parent domain interrupt */ ++ irq_set_chained_handler(sswi_ipi_virq, thead_aclint_sswi_ipi_handle); ++ ++ cpuhp_setup_state(CPUHP_AP_IRQ_THEAD_ACLINT_SSWI_STARTING, ++ "irqchip/thead-aclint-sswi:starting", ++ thead_aclint_sswi_starting_cpu, ++ thead_aclint_sswi_dying_cpu); ++ ++ return 0; ++} ++ ++static int __init thead_aclint_sswi_early_acpi_init(union acpi_subtable_headers *header, ++ const unsigned long end) ++{ ++ unsigned long hartid; ++ struct resource *res; ++ int rc, cpu; ++ u32 i, nr_parent_irqs; ++ void __iomem *reg; ++ ++ nr_parent_irqs = 0; ++ ++ /* If it is T-HEAD CPU, check whether SSWI is enabled */ ++ if (riscv_cached_mvendorid(0) == THEAD_VENDOR_ID && ++ !(csr_read(THEAD_C9XX_CSR_SXSTATUS) & THEAD_C9XX_SXSTATUS_CLINTEE)) ++ return -ENOTSUPP; ++ ++ sswi_acpi_fwnode = irq_domain_alloc_named_fwnode("ACLINT-SSWI"); ++ if (!sswi_acpi_fwnode) { ++ pr_err("unable to allocate SSWI FW node\n"); ++ return -ENOMEM; ++ } ++ ++ /* Find number of parent interrupts */ ++ while (!sswi_get_parent_hartid(sswi_acpi_fwnode, nr_parent_irqs, &hartid)) ++ (nr_parent_irqs)++; ++ if (!nr_parent_irqs) { ++ pr_err("%pfwP: no parent irqs available\n", sswi_acpi_fwnode); ++ return -EINVAL; ++ } ++ ++ /* Allocate MMIO resource array */ ++ res = kmalloc(sizeof(struct resource), GFP_KERNEL); ++ if (!res) { ++ pr_err("unable to allocate MMIO resource\n"); ++ return -ENOMEM; ++ } ++ ++ /* Find MMIO register base address */ ++ rc = sswi_get_mmio_resource(sswi_acpi_fwnode, 0, res); ++ if (rc) { ++ pr_warn("%pfwP: hart ID for parent irq0 not found\n", sswi_acpi_fwnode); ++ return -EINVAL; ++ } ++ ++ reg = ioremap(res->start, resource_size(res)); ++ if (!reg) { ++ rc = -EBUSY; ++ pr_err("%pfwP: ioremap failed\n", sswi_acpi_fwnode); ++ goto ioremap_fail; ++ } ++ ++ /* Configure handlers for target CPUs */ ++ for (i = 0; i < nr_parent_irqs; i++) { ++ rc = sswi_get_parent_hartid(sswi_acpi_fwnode, i, &hartid); ++ if (rc) { ++ pr_warn("%pfwP: hart ID for parent irq%d not found\n", sswi_acpi_fwnode, i); ++ continue; ++ } ++ ++ cpu = riscv_hartid_to_cpuid(hartid); ++ if (cpu < 0) { ++ pr_warn("%pfwP: invalid cpuid for parent irq%d\n", sswi_acpi_fwnode, i); ++ continue; ++ } ++ ++ per_cpu(sswi_cpu_regs, cpu) = reg + i * THEAD_ACLINT_xSWI_REGISTER_SIZE; ++ } ++ ++ /* If mulitple SSWI devices are present, do not register irq again */ ++ if (sswi_ipi_virq) ++ return 0; ++ ++ /* Do early setup of IPIs */ ++ rc = sswi_early_probe(sswi_acpi_fwnode); ++ if (rc) { ++ irq_domain_free_fwnode(sswi_acpi_fwnode); ++ sswi_acpi_fwnode = NULL; ++ goto probe_fail; ++ } ++ ++ return 0; ++ ++probe_fail: ++ iounmap(reg); ++ irq_domain_free_fwnode(sswi_acpi_fwnode); ++ sswi_acpi_fwnode = NULL; ++ioremap_fail: ++ kfree(res); ++ return rc; ++} ++ ++IRQCHIP_ACPI_DECLARE(thead_aclint_sswi, ACPI_MADT_TYPE_IMSIC, NULL, ++ 1, thead_aclint_sswi_early_acpi_init); ++#endif diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index bc2e265cb02d..a0186d27086f 100644 --- a/drivers/mailbox/Kconfig @@ -354072,11 +386485,549 @@ index 000000000000..71c983bd631c +MODULE_AUTHOR("Fugang Duan "); +MODULE_DESCRIPTION("XuanTie TH1520 Mailbox IPC driver"); +MODULE_LICENSE("GPL v2"); +diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig +index 96633e8d4a9c..7a4a2b83972d 100644 +--- a/drivers/mfd/Kconfig ++++ b/drivers/mfd/Kconfig +@@ -2234,6 +2234,18 @@ config MFD_QCOM_PM8008 + under it in the device tree. Additional drivers must be enabled in + order to use the functionality of the device. + ++config MFD_SPACEMIT_P1 ++ tristate "Spacemit P1 Power Management IC" ++ depends on I2C && OF ++ select MFD_CORE ++ select REGMAP_I2C ++ select REGMAP_IRQ ++ help ++ Select this option to get support for the Spacemit P1 Power Management IC. ++ P1 is a dedicated PMIC that integrates voltage supply, gpio, rtc and adc ++ functions in to a single chip. This driver provides common support for ++ accessing the device. ++ + menu "Multimedia Capabilities Port drivers" + depends on ARCH_SA1100 + +diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile +index 95867f732d2e..ef91824881c6 100644 +--- a/drivers/mfd/Makefile ++++ b/drivers/mfd/Makefile +@@ -285,3 +285,5 @@ obj-$(CONFIG_MFD_ATC260X_I2C) += atc260x-i2c.o + + obj-$(CONFIG_MFD_RSMU_I2C) += rsmu_i2c.o rsmu_core.o + obj-$(CONFIG_MFD_RSMU_SPI) += rsmu_spi.o rsmu_core.o ++ ++obj-$(CONFIG_MFD_SPACEMIT_P1) += spacemit-p1.o +diff --git a/drivers/mfd/spacemit-p1.c b/drivers/mfd/spacemit-p1.c +new file mode 100644 +index 000000000000..7be10d024c1f +--- /dev/null ++++ b/drivers/mfd/spacemit-p1.c +@@ -0,0 +1,481 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * mfd driver for Spacemit P1 ++ * ++ * Copyright (c) 2023, Spacemit Co., Ltd ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct mfd_match_data { ++ const struct regmap_config *regmap_cfg; ++ const struct regmap_irq_chip *regmap_irq_chip; ++ const struct mfd_cell *mfd_cells; ++ int nr_cells; ++ const char *name; ++ void *ptr; ++ ++ /* shutdown - reboot support */ ++ struct { ++ unsigned char reg; ++ unsigned char bit; ++ } shutdown; ++ ++ struct { ++ unsigned int reg; ++ unsigned char bit; ++ } reboot; ++ ++ /* value will be kept in register while reset pmic */ ++ struct { ++ unsigned int reg; ++ unsigned char bit; ++ } non_reset; ++}; ++ ++static const struct regmap_config spm_p1_regmap_config = { ++ .reg_bits = 8, ++ .val_bits = 8, ++ .max_register = SPM_P1_MAX_REG, ++ .cache_type = REGCACHE_RBTREE, ++}; ++ ++static const struct regmap_irq spm_p1_irqs[] = { ++ [SPM_P1_E_GPI0] = { ++ .mask = SPM_P1_E_GPI0_MSK, ++ .reg_offset = 0, ++ }, ++ ++ [SPM_P1_E_GPI1] = { ++ .mask = SPM_P1_E_GPI1_MSK, ++ .reg_offset = 0, ++ }, ++ ++ [SPM_P1_E_GPI2] = { ++ .mask = SPM_P1_E_GPI2_MSK, ++ .reg_offset = 0, ++ }, ++ ++ [SPM_P1_E_GPI3] = { ++ .mask = SPM_P1_E_GPI3_MSK, ++ .reg_offset = 0, ++ }, ++ ++ [SPM_P1_E_GPI4] = { ++ .mask = SPM_P1_E_GPI4_MSK, ++ .reg_offset = 0, ++ }, ++ ++ [SPM_P1_E_GPI5] = { ++ .mask = SPM_P1_E_GPI5_MSK, ++ .reg_offset = 0, ++ }, ++ ++ [SPM_P1_E_ADC_TEMP] = { ++ .mask = SPM_P1_E_ADC_TEMP_MSK, ++ .reg_offset = 1, ++ }, ++ ++ [SPM_P1_E_ADC_EOC] = { ++ .mask = SPM_P1_E_ADC_EOC_MSK, ++ .reg_offset = 1, ++ }, ++ ++ [SPM_P1_E_ADC_EOS] = { ++ .mask = SPM_P1_E_ADC_EOS_MSK, ++ .reg_offset = 1, ++ }, ++ ++ [SPM_P1_E_WDT_TO] = { ++ .mask = SPM_P1_E_WDT_TO_MSK, ++ .reg_offset = 1, ++ }, ++ ++ [SPM_P1_E_ALARM] = { ++ .mask = SPM_P1_E_ALARM_MSK, ++ .reg_offset = 1, ++ }, ++ ++ [SPM_P1_E_TICK] = { ++ .mask = SPM_P1_E_TICK_MSK, ++ .reg_offset = 1, ++ }, ++ ++ [SPM_P1_E_LDO_OV] = { ++ .mask = SPM_P1_E_LDO_OV_MSK, ++ .reg_offset = 2, ++ }, ++ ++ [SPM_P1_E_LDO_UV] = { ++ .mask = SPM_P1_E_LDO_UV_MSK, ++ .reg_offset = 2, ++ }, ++ ++ [SPM_P1_E_LDO_SC] = { ++ .mask = SPM_P1_E_LDO_SC_MSK, ++ .reg_offset = 2, ++ }, ++ ++ [SPM_P1_E_SW_SC] = { ++ .mask = SPM_P1_E_SW_SC_MSK, ++ .reg_offset = 2, ++ }, ++ ++ [SPM_P1_E_TEMP_WARN] = { ++ .mask = SPM_P1_E_TEMP_WARN_MSK, ++ .reg_offset = 2, ++ }, ++ ++ [SPM_P1_E_TEMP_SEVERE] = { ++ .mask = SPM_P1_E_TEMP_SEVERE_MSK, ++ .reg_offset = 2, ++ }, ++ ++ [SPM_P1_E_TEMP_CRIT] = { ++ .mask = SPM_P1_E_TEMP_CRIT_MSK, ++ .reg_offset = 2, ++ }, ++ ++ [SPM_P1_E_BUCK1_OV] = { ++ .mask = SPM_P1_E_BUCK1_OV_MSK, ++ .reg_offset = 3, ++ }, ++ ++ [SPM_P1_E_BUCK2_OV] = { ++ .mask = SPM_P1_E_BUCK2_OV_MSK, ++ .reg_offset = 3, ++ }, ++ ++ [SPM_P1_E_BUCK3_OV] = { ++ .mask = SPM_P1_E_BUCK3_OV_MSK, ++ .reg_offset = 3, ++ }, ++ ++ [SPM_P1_E_BUCK4_OV] = { ++ .mask = SPM_P1_E_BUCK4_OV_MSK, ++ .reg_offset = 3, ++ }, ++ ++ [SPM_P1_E_BUCK5_OV] = { ++ .mask = SPM_P1_E_BUCK5_OV_MSK, ++ .reg_offset = 3, ++ }, ++ ++ [SPM_P1_E_BUCK6_OV] = { ++ .mask = SPM_P1_E_BUCK6_OV_MSK, ++ .reg_offset = 3, ++ }, ++ ++ [SPM_P1_E_BUCK1_UV] = { ++ .mask = SPM_P1_E_BUCK1_UV_MSK, ++ .reg_offset = 4, ++ }, ++ ++ [SPM_P1_E_BUCK2_UV] = { ++ .mask = SPM_P1_E_BUCK2_UV_MSK, ++ .reg_offset = 4, ++ }, ++ ++ [SPM_P1_E_BUCK3_UV] = { ++ .mask = SPM_P1_E_BUCK3_UV_MSK, ++ .reg_offset = 4, ++ }, ++ ++ [SPM_P1_E_BUCK4_UV] = { ++ .mask = SPM_P1_E_BUCK4_UV_MSK, ++ .reg_offset = 4, ++ }, ++ ++ [SPM_P1_E_BUCK5_UV] = { ++ .mask = SPM_P1_E_BUCK5_UV_MSK, ++ .reg_offset = 4, ++ }, ++ ++ [SPM_P1_E_BUCK6_UV] = { ++ .mask = SPM_P1_E_BUCK6_UV_MSK, ++ .reg_offset = 4, ++ }, ++ ++ [SPM_P1_E_BUCK1_SC] = { ++ .mask = SPM_P1_E_BUCK1_SC_MSK, ++ .reg_offset = 5, ++ }, ++ ++ [SPM_P1_E_BUCK2_SC] = { ++ .mask = SPM_P1_E_BUCK2_SC_MSK, ++ .reg_offset = 5, ++ }, ++ ++ [SPM_P1_E_BUCK3_SC] = { ++ .mask = SPM_P1_E_BUCK3_SC_MSK, ++ .reg_offset = 5, ++ }, ++ ++ [SPM_P1_E_BUCK4_SC] = { ++ .mask = SPM_P1_E_BUCK4_SC_MSK, ++ .reg_offset = 5, ++ }, ++ ++ [SPM_P1_E_BUCK5_SC] = { ++ .mask = SPM_P1_E_BUCK5_SC_MSK, ++ .reg_offset = 5, ++ }, ++ ++ [SPM_P1_E_BUCK6_SC] = { ++ .mask = SPM_P1_E_BUCK6_SC_MSK, ++ .reg_offset = 5, ++ }, ++ ++ [SPM_P1_E_PWRON_RINTR] = { ++ .mask = SPM_P1_E_PWRON_RINTR_MSK, ++ .reg_offset = 6, ++ }, ++ ++ [SPM_P1_E_PWRON_FINTR] = { ++ .mask = SPM_P1_E_PWRON_FINTR_MSK, ++ .reg_offset = 6, ++ }, ++ ++ [SPM_P1_E_PWRON_SINTR] = { ++ .mask = SPM_P1_E_PWRON_SINTR_MSK, ++ .reg_offset = 6, ++ }, ++ ++ [SPM_P1_E_PWRON_LINTR] = { ++ .mask = SPM_P1_E_PWRON_LINTR_MSK, ++ .reg_offset = 6, ++ }, ++ ++ [SPM_P1_E_PWRON_SDINTR] = { ++ .mask = SPM_P1_E_PWRON_SDINTR_MSK, ++ .reg_offset = 6, ++ }, ++ ++ [SPM_P1_E_VSYS_OV] = { ++ .mask = SPM_P1_E_VSYS_OV_MSK, ++ .reg_offset = 6, ++ }, ++}; ++ ++static const struct regmap_irq_chip spm_p1_irq_chip = { ++ .name = "spm_p1", ++ .irqs = spm_p1_irqs, ++ .num_irqs = ARRAY_SIZE(spm_p1_irqs), ++ .num_regs = 7, ++ .status_base = SPM_P1_E_STATUS_REG_BASE, ++ .mask_base = SPM_P1_E_EN_REG_BASE, ++ .unmask_base = SPM_P1_E_EN_REG_BASE, ++ .ack_base = SPM_P1_E_STATUS_REG_BASE, ++ .init_ack_masked = true, ++ .mask_unmask_non_inverted = true, ++}; ++ ++/* power-key desc */ ++static const struct resource spm_p1_pwrkey_resources[] = { ++ DEFINE_RES_IRQ(SPM_P1_E_PWRON_RINTR), ++ DEFINE_RES_IRQ(SPM_P1_E_PWRON_FINTR), ++ DEFINE_RES_IRQ(SPM_P1_E_PWRON_SINTR), ++ DEFINE_RES_IRQ(SPM_P1_E_PWRON_LINTR), ++}; ++ ++/* rtc desc */ ++static const struct resource spm_p1_rtc_resources[] = { ++ DEFINE_RES_IRQ(SPM_P1_E_ALARM), ++}; ++ ++/* adc desc */ ++static const struct resource spm_p1_adc_resources[] = { ++ DEFINE_RES_IRQ(SPM_P1_E_ADC_EOC), ++}; ++ ++/* mfd configuration */ ++static const struct mfd_cell spm_p1[] = { ++ { ++ .name = "spm-p1-regulator", ++ .of_compatible = "spacemit,p1,regulator", ++ }, ++ { ++ .name = "spm-p1-pinctrl", ++ .of_compatible = "spacemit,p1,pinctrl", ++ }, ++ { ++ .name = "spm-p1-pwrkey", ++ .of_compatible = "spacemit,p1,pwrkey", ++ .num_resources = ARRAY_SIZE(spm_p1_pwrkey_resources), ++ .resources = &spm_p1_pwrkey_resources[0], ++ }, ++ { ++ .name = "spm-p1-rtc", ++ .of_compatible = "spacemit,p1,rtc", ++ .num_resources = ARRAY_SIZE(spm_p1_rtc_resources), ++ .resources = &spm_p1_rtc_resources[0], ++ }, ++ { ++ .name = "spm-p1-adc", ++ .of_compatible = "spacemit,p1,adc", ++ .num_resources = ARRAY_SIZE(spm_p1_adc_resources), ++ .resources = &spm_p1_adc_resources[0], ++ }, ++}; ++ ++static struct mfd_match_data spm_p1_mfd_match_data = { ++ .regmap_cfg = &spm_p1_regmap_config, ++ .regmap_irq_chip = &spm_p1_irq_chip, ++ .mfd_cells = spm_p1, ++ .nr_cells = ARRAY_SIZE(spm_p1), ++ .name = "spm_p1", ++ .shutdown = { ++ .reg = SPM_P1_PWR_CTRL2, ++ .bit = SPM_P1_SW_SHUTDOWN_BIT_MSK, ++ }, ++ .reboot = { ++ .reg = SPM_P1_PWR_CTRL2, ++ .bit = SPM_P1_SW_RESET_BIT_MSK, ++ }, ++ .non_reset = { ++ .reg = SPM_P1_NON_RESET_REG, ++ .bit = SPM_P1_RESTART_CFG_BIT_MSK, ++ }, ++}; ++ ++struct mfd_match_data *match_data; ++ ++static void spm_p1_power_off(void) ++{ ++ struct spacemit_pmic *pmic = (struct spacemit_pmic *)match_data->ptr; ++ ++ regmap_update_bits(pmic->regmap, match_data->shutdown.reg, match_data->shutdown.bit, ++ match_data->shutdown.bit); ++ ++ /* Wait for poweroff */ ++ for (;;) ++ cpu_relax(); ++} ++ ++static int spm_p1_restart_notify(struct notifier_block *this, unsigned long mode, void *cmd) ++{ ++ struct spacemit_pmic *pmic = (struct spacemit_pmic *)match_data->ptr; ++ ++ regmap_update_bits(pmic->regmap, match_data->reboot.reg, match_data->reboot.bit, ++ match_data->reboot.bit); ++ ++ return NOTIFY_DONE; ++} ++ ++static struct notifier_block spm_p1_restart_handler = { ++ .notifier_call = spm_p1_restart_notify, ++ .priority = 0, ++}; ++ ++static int spm_p1_probe(struct i2c_client *client) ++{ ++ int ret; ++ int nr_cells; ++ struct device_node *np; ++ struct spacemit_pmic *pmic; ++ const struct mfd_cell *cells; ++ const struct of_device_id *of_id; ++ ++ pmic = devm_kzalloc(&client->dev, sizeof(*pmic), GFP_KERNEL); ++ if (!pmic) ++ return -ENOMEM; ++ ++ of_id = of_match_device(client->dev.driver->of_match_table, &client->dev); ++ if (!of_id) { ++ pr_err("Unable to match OF ID\n"); ++ return -ENODEV; ++ } ++ ++ /* find the property in device node */ ++ np = of_find_compatible_node(NULL, NULL, of_id->compatible); ++ if (!np) ++ return 0; ++ ++ of_node_put(np); ++ ++ match_data = (struct mfd_match_data *)of_id->data; ++ match_data->ptr = (void *)pmic; ++ ++ pmic->regmap_cfg = match_data->regmap_cfg; ++ pmic->regmap_irq_chip = match_data->regmap_irq_chip; ++ cells = match_data->mfd_cells; ++ nr_cells = match_data->nr_cells; ++ ++ pmic->i2c = client; ++ ++ i2c_set_clientdata(client, pmic); ++ ++ pmic->regmap = devm_regmap_init_i2c(client, pmic->regmap_cfg); ++ if (IS_ERR(pmic->regmap)) { ++ pr_err("%s:%d, regmap initialization failed\n", __func__, __LINE__); ++ return PTR_ERR(pmic->regmap); ++ } ++ ++ regcache_cache_bypass(pmic->regmap, true); ++ ++ if (!client->irq) ++ pr_warn("%s:%d, No interrupt supported\n", __func__, __LINE__); ++ else { ++ if (pmic->regmap_irq_chip) { ++ ret = regmap_add_irq_chip(pmic->regmap, client->irq, IRQF_ONESHOT, -1, ++ pmic->regmap_irq_chip, &pmic->irq_data); ++ if (ret) { ++ pr_err("failed to add irqchip %d\n", ret); ++ return ret; ++ } ++ } ++ ++ dev_pm_set_wake_irq(&client->dev, client->irq); ++ device_init_wakeup(&client->dev, true); ++ } ++ ++ ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_NONE, cells, nr_cells, NULL, 0, ++ regmap_irq_get_domain(pmic->irq_data)); ++ if (ret) { ++ pr_err("failed to add MFD devices %d\n", ret); ++ return -EINVAL; ++ } ++ ++ if (match_data->shutdown.reg) ++ pm_power_off = spm_p1_power_off; ++ ++ if (match_data->reboot.reg) { ++ ret = register_restart_handler(&spm_p1_restart_handler); ++ if (ret) ++ pr_warn("failed to register rst handler, %d\n", ret); ++ } ++ ++ return 0; ++} ++ ++static const struct of_device_id spm_p1_of_match[] = { ++ { .compatible = "spacemit,p1", .data = (void *)&spm_p1_mfd_match_data }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, spm_p1_of_match); ++ ++static struct i2c_driver spm_p1_i2c_driver = { ++ .driver = { ++ .name = "spm-p1", ++ .of_match_table = spm_p1_of_match, ++ }, ++ .probe = spm_p1_probe, ++}; ++ ++static int spacemit_mfd_init(void) ++{ ++ return i2c_add_driver(&spm_p1_i2c_driver); ++} ++subsys_initcall(spacemit_mfd_init); ++ ++MODULE_DESCRIPTION("Spacemit P1 mfd driver"); ++MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig -index bc7e2ad37002..5c705adef829 100644 +index bc7e2ad37002..aede8d196d6b 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig -@@ -654,6 +654,20 @@ config MMC_SDHCI_SPRD +@@ -239,6 +239,17 @@ config MMC_SDHCI_OF_DWCMSHC + If you have a controller with this interface, say Y or M here. + If unsure, say N. + ++config MMC_SDHCI_OF_K1 ++ tristate "SDHCI OF support for the Spacemit K1 SDHCI controllers" ++ depends on MMC_SDHCI_PLTFM ++ depends on OF ++ depends on COMMON_CLK ++ help ++ This selects the Secure Digital Host Controller Interface (SDHCI) ++ found in the Spacemit SoC K1. ++ If you have a controller with this interface, say Y or M here. ++ If unsure, say N. ++ + config MMC_SDHCI_OF_SPARX5 + tristate "SDHCI OF support for the MCHP Sparx5 SoC" + depends on MMC_SDHCI_PLTFM +@@ -654,6 +665,20 @@ config MMC_SDHCI_SPRD If unsure, say N. @@ -354098,10 +387049,18 @@ index bc7e2ad37002..5c705adef829 100644 tristate diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile -index a693fa3d3f1c..36976786afe2 100644 +index a693fa3d3f1c..69ddc0fa3a37 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile -@@ -95,6 +95,7 @@ obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o +@@ -86,6 +86,7 @@ obj-$(CONFIG_MMC_SDHCI_OF_AT91) += sdhci-of-at91.o + obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o + obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o + obj-$(CONFIG_MMC_SDHCI_OF_DWCMSHC) += sdhci-of-dwcmshc.o ++obj-$(CONFIG_MMC_SDHCI_OF_K1) += sdhci-of-k1.o + obj-$(CONFIG_MMC_SDHCI_OF_SPARX5) += sdhci-of-sparx5.o + obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o + obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o +@@ -95,6 +96,7 @@ obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o obj-$(CONFIG_MMC_SDHCI_SPRD) += sdhci-sprd.o @@ -354853,6 +387812,1487 @@ index a0524127ca07..84ed8dc8681b 100644 #ifdef CONFIG_ACPI if (pltfm_data == &sdhci_dwcmshc_bf3_pdata) sdhci_enable_v4_mode(host); +diff --git a/drivers/mmc/host/sdhci-of-k1.c b/drivers/mmc/host/sdhci-of-k1.c +new file mode 100644 +index 000000000000..3dac2b164d30 +--- /dev/null ++++ b/drivers/mmc/host/sdhci-of-k1.c +@@ -0,0 +1,1475 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Driver for Spacemit Mobile Storage Host Controller ++ * ++ * Copyright (C) 2023 Spacemit ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "sdhci.h" ++#include "sdhci-pltfm.h" ++ ++/* SDH registers define */ ++#define SDHC_OP_EXT_REG 0x108 ++#define OVRRD_CLK_OEN 0x0800 ++#define FORCE_CLK_ON 0x1000 ++ ++#define SDHC_LEGACY_CTRL_REG 0x10C ++#define GEN_PAD_CLK_ON 0x0040 ++ ++#define SDHC_MMC_CTRL_REG 0x114 ++#define MISC_INT_EN 0x0002 ++#define MISC_INT 0x0004 ++#define ENHANCE_STROBE_EN 0x0100 ++#define MMC_HS400 0x0200 ++#define MMC_HS200 0x0400 ++#define MMC_CARD_MODE 0x1000 ++ ++#define SDHC_TX_CFG_REG 0x11C ++#define TX_INT_CLK_SEL 0x40000000 ++#define TX_MUX_SEL 0x80000000 ++ ++#define SDHC_PHY_CTRL_REG 0x160 ++#define PHY_FUNC_EN 0x0001 ++#define PHY_PLL_LOCK 0x0002 ++#define HOST_LEGACY_MODE 0x80000000 ++ ++#define SDHC_PHY_FUNC_REG 0x164 ++#define PHY_TEST_EN 0x0080 ++#define HS200_USE_RFIFO 0x8000 ++ ++#define SDHC_PHY_DLLCFG 0x168 ++#define DLL_PREDLY_NUM 0x04 ++#define DLL_FULLDLY_RANGE 0x10 ++#define DLL_VREG_CTRL 0x40 ++#define DLL_ENABLE 0x80000000 ++#define DLL_REFRESH_SWEN_SHIFT 0x1C ++#define DLL_REFRESH_SW_SHIFT 0x1D ++ ++#define SDHC_PHY_DLLCFG1 0x16C ++#define DLL_REG2_CTRL 0x0C ++#define DLL_REG3_CTRL_MASK 0xFF ++#define DLL_REG3_CTRL_SHIFT 0x10 ++#define DLL_REG2_CTRL_MASK 0xFF ++#define DLL_REG2_CTRL_SHIFT 0x08 ++#define DLL_REG1_CTRL 0x92 ++#define DLL_REG1_CTRL_MASK 0xFF ++#define DLL_REG1_CTRL_SHIFT 0x00 ++ ++#define SDHC_PHY_DLLSTS 0x170 ++#define DLL_LOCK_STATE 0x01 ++ ++#define SDHC_PHY_DLLSTS1 0x174 ++#define DLL_MASTER_DELAY_MASK 0xFF ++#define DLL_MASTER_DELAY_SHIFT 0x10 ++ ++#define SDHC_PHY_PADCFG_REG 0x178 ++#define RX_BIAS_CTRL_SHIFT 0x5 ++#define PHY_DRIVE_SEL_SHIFT 0x0 ++#define PHY_DRIVE_SEL_MASK 0x7 ++#define PHY_DRIVE_SEL_DEFAULT 0x4 ++ ++#define RPM_DELAY 50 ++#define MAX_74CLK_WAIT_COUNT 100 ++ ++#define MMC1_IO_V18EN 0x04 ++#define AKEY_ASFAR 0xBABA ++#define AKEY_ASSAR 0xEB10 ++ ++#define SDHC_RX_CFG_REG 0x118 ++#define RX_SDCLK_SEL0_MASK 0x03 ++#define RX_SDCLK_SEL0_SHIFT 0x00 ++#define RX_SDCLK_SEL0 0x02 ++#define RX_SDCLK_SEL1_MASK 0x03 ++#define RX_SDCLK_SEL1_SHIFT 0x02 ++#define RX_SDCLK_SEL1 0x01 ++ ++#define SDHC_DLINE_CTRL_REG 0x130 ++#define DLINE_PU 0x01 ++#define RX_DLINE_CODE_MASK 0xFF ++#define RX_DLINE_CODE_SHIFT 0x10 ++#define TX_DLINE_CODE_MASK 0xFF ++#define TX_DLINE_CODE_SHIFT 0x18 ++ ++#define SDHC_DLINE_CFG_REG 0x134 ++#define RX_DLINE_REG_MASK 0xFF ++#define RX_DLINE_REG_SHIFT 0x00 ++#define RX_DLINE_GAIN_MASK 0x1 ++#define RX_DLINE_GAIN_SHIFT 0x8 ++#define RX_DLINE_GAIN 0x1 ++#define TX_DLINE_REG_MASK 0xFF ++#define TX_DLINE_REG_SHIFT 0x10 ++ ++#define SDHC_RX_TUNE_DELAY_MIN 0x0 ++#define SDHC_RX_TUNE_DELAY_MAX 0xFF ++#define SDHC_RX_TUNE_DELAY_STEP 0x1 ++ ++static struct sdhci_host *sdio_host; ++ ++struct sdhci_spacemit { ++ struct clk *clk_core; ++ struct clk *clk_io; ++ struct clk *clk_aib; ++ struct reset_control *reset; ++ unsigned char power_mode; ++ struct pinctrl_state *pin; ++ struct pinctrl *pinctrl; ++}; ++ ++static const u32 tuning_patten4[16] = { ++ 0x00ff0fff, 0xccc3ccff, 0xffcc3cc3, 0xeffefffe, ++ 0xddffdfff, 0xfbfffbff, 0xff7fffbf, 0xefbdf777, ++ 0xf0fff0ff, 0x3cccfc0f, 0xcfcc33cc, 0xeeffefff, ++ 0xfdfffdff, 0xffbfffdf, 0xfff7ffbb, 0xde7b7ff7, ++}; ++ ++static const u32 tuning_patten8[32] = { ++ 0xff00ffff, 0x0000ffff, 0xccccffff, 0xcccc33cc, ++ 0xcc3333cc, 0xffffcccc, 0xffffeeff, 0xffeeeeff, ++ 0xffddffff, 0xddddffff, 0xbbffffff, 0xbbffffff, ++ 0xffffffbb, 0xffffff77, 0x77ff7777, 0xffeeddbb, ++ 0x00ffffff, 0x00ffffff, 0xccffff00, 0xcc33cccc, ++ 0x3333cccc, 0xffcccccc, 0xffeeffff, 0xeeeeffff, ++ 0xddffffff, 0xddffffff, 0xffffffdd, 0xffffffbb, ++ 0xffffbbbb, 0xffff77ff, 0xff7777ff, 0xeeddbb77, ++}; ++ ++static u32 spacemit_handle_interrupt(struct sdhci_host *host, u32 intmask) ++{ ++ /* handle sdio SDHCI_INT_CARD_INT */ ++ if ((intmask & SDHCI_INT_CARD_INT) && (host->ier & SDHCI_INT_CARD_INT)) { ++ if (!(host->flags & SDHCI_DEVICE_DEAD)) { ++ host->ier &= ~SDHCI_INT_CARD_INT; ++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); ++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); ++ } ++ ++ /* wakeup ksdioirqd thread */ ++ host->mmc->sdio_irq_pending = true; ++ if (host->mmc->sdio_irq_thread) ++ wake_up_process(host->mmc->sdio_irq_thread); ++ } ++ ++ /* handle error interrupts */ ++ if (intmask & SDHCI_INT_ERROR) { ++ if (intmask & (SDHCI_INT_CRC | SDHCI_INT_DATA_CRC | ++ SDHCI_INT_DATA_END_BIT | SDHCI_INT_AUTO_CMD_ERR)) ++ /* handle crc error for sd device */ ++ if (host->mmc->caps2 & MMC_CAP2_NO_MMC) ++ host->mmc->caps2 |= SDHCI_QUIRK2_BROKEN_SDR104; ++ } ++ ++ return intmask; ++} ++ ++static void spacemit_sdhci_reset(struct sdhci_host *host, u8 mask) ++{ ++ struct platform_device *pdev; ++ struct k1_sdhci_platdata *pdata; ++ unsigned int reg; ++ ++ pdev = to_platform_device(mmc_dev(host->mmc)); ++ pdata = pdev->dev.platform_data; ++ sdhci_reset(host, mask); ++ ++ if (mask != SDHCI_RESET_ALL) ++ return; ++ ++ /* sd/sdio only be SDHCI_QUIRK2_BROKEN_PHY_MODULE */ ++ if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_PHY_MODULE)) { ++ if (host->quirks2 & SDHCI_QUIRK2_SUPPORT_PHY_BYPASS) { ++ /* use phy bypass */ ++ reg = sdhci_readl(host, SDHC_TX_CFG_REG); ++ reg |= TX_INT_CLK_SEL; ++ sdhci_writel(host, reg, SDHC_TX_CFG_REG); ++ ++ reg = sdhci_readl(host, SDHC_PHY_CTRL_REG); ++ reg |= HOST_LEGACY_MODE; ++ sdhci_writel(host, reg, SDHC_PHY_CTRL_REG); ++ ++ reg = sdhci_readl(host, SDHC_PHY_FUNC_REG); ++ reg |= PHY_TEST_EN; ++ sdhci_writel(host, reg, SDHC_PHY_FUNC_REG); ++ } else { ++ /* use phy func mode */ ++ reg = sdhci_readl(host, SDHC_PHY_CTRL_REG); ++ reg |= (PHY_FUNC_EN | PHY_PLL_LOCK); ++ sdhci_writel(host, reg, SDHC_PHY_CTRL_REG); ++ ++ reg = sdhci_readl(host, SDHC_PHY_PADCFG_REG); ++ reg |= (1 << RX_BIAS_CTRL_SHIFT); ++ ++ reg &= ~(PHY_DRIVE_SEL_MASK); ++ reg |= (pdata->phy_driver_sel & PHY_DRIVE_SEL_MASK) << PHY_DRIVE_SEL_SHIFT; ++ sdhci_writel(host, reg, SDHC_PHY_PADCFG_REG); ++ } ++ } else { ++ reg = sdhci_readl(host, SDHC_TX_CFG_REG); ++ reg |= TX_INT_CLK_SEL; ++ sdhci_writel(host, reg, SDHC_TX_CFG_REG); ++ } ++ ++ /* for emmc */ ++ if (!(host->mmc->caps2 & MMC_CAP2_NO_MMC)) { ++ /* mmc card mode */ ++ reg = sdhci_readl(host, SDHC_MMC_CTRL_REG); ++ reg |= MMC_CARD_MODE; ++ sdhci_writel(host, reg, SDHC_MMC_CTRL_REG); ++ } ++} ++ ++static void spacemit_sdhci_gen_init_74_clocks(struct sdhci_host *host, u8 power_mode) ++{ ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_spacemit *spacemit = sdhci_pltfm_priv(pltfm_host); ++ unsigned int reg; ++ int count; ++ ++ if (!(host->mmc->caps2 & MMC_CAP2_NO_SDIO)) ++ return; ++ ++ if ((spacemit->power_mode == MMC_POWER_UP) && ++ (power_mode == MMC_POWER_ON)) { ++ reg = sdhci_readl(host, SDHC_MMC_CTRL_REG); ++ reg |= MISC_INT_EN; ++ sdhci_writel(host, reg, SDHC_MMC_CTRL_REG); ++ ++ reg = sdhci_readl(host, SDHC_LEGACY_CTRL_REG); ++ reg |= GEN_PAD_CLK_ON; ++ sdhci_writel(host, reg, SDHC_LEGACY_CTRL_REG); ++ ++ count = 0; ++ while (count++ < MAX_74CLK_WAIT_COUNT) { ++ if (sdhci_readl(host, SDHC_MMC_CTRL_REG) & MISC_INT) ++ break; ++ udelay(10); ++ } ++ ++ if (count == MAX_74CLK_WAIT_COUNT) ++ pr_warn("%s: gen 74 clock interrupt timeout\n", mmc_hostname(host->mmc)); ++ ++ reg = sdhci_readl(host, SDHC_MMC_CTRL_REG); ++ reg |= MISC_INT; ++ sdhci_writel(host, reg, SDHC_MMC_CTRL_REG); ++ } ++ spacemit->power_mode = power_mode; ++} ++ ++static void spacemit_sdhci_caps_disable(struct sdhci_host *host) ++{ ++ struct platform_device *pdev; ++ struct k1_sdhci_platdata *pdata; ++ ++ pdev = to_platform_device(mmc_dev(host->mmc)); ++ pdata = pdev->dev.platform_data; ++ ++ if (pdata->host_caps_disable) ++ host->mmc->caps &= ~(pdata->host_caps_disable); ++ if (pdata->host_caps2_disable) ++ host->mmc->caps2 &= ~(pdata->host_caps2_disable); ++} ++ ++static void spacemit_sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned int timing) ++{ ++ u16 reg; ++ ++ if ((timing == MMC_TIMING_MMC_HS200) || ++ (timing == MMC_TIMING_MMC_HS400)) { ++ reg = sdhci_readw(host, SDHC_MMC_CTRL_REG); ++ reg |= (timing == MMC_TIMING_MMC_HS200) ? MMC_HS200 : MMC_HS400; ++ sdhci_writew(host, reg, SDHC_MMC_CTRL_REG); ++ } ++ sdhci_set_uhs_signaling(host, timing); ++ if (!(host->mmc->caps2 & MMC_CAP2_NO_SDIO)) { ++ reg = sdhci_readw(host, SDHCI_HOST_CONTROL2); ++ sdhci_writew(host, reg | SDHCI_CTRL_VDD_180, SDHCI_HOST_CONTROL2); ++ } ++} ++ ++static void spacemit_sdhci_set_clk_gate(struct sdhci_host *host, unsigned int auto_gate) ++{ ++ unsigned int reg; ++ ++ reg = sdhci_readl(host, SDHC_OP_EXT_REG); ++ if (auto_gate) ++ reg &= ~(OVRRD_CLK_OEN | FORCE_CLK_ON); ++ else ++ reg |= (OVRRD_CLK_OEN | FORCE_CLK_ON); ++ sdhci_writel(host, reg, SDHC_OP_EXT_REG); ++} ++ ++static int spacemit_sdhci_card_busy(struct mmc_host *mmc) ++{ ++ struct sdhci_host *host = mmc_priv(mmc); ++ u32 present_state; ++ u32 ret; ++ u32 cmd; ++ ++ /* Check whether DAT[0] is 0 */ ++ present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); ++ ret = !(present_state & SDHCI_DATA_0_LVL_MASK); ++ ++ if (host->mmc->caps2 & MMC_CAP2_NO_MMC) { ++ cmd = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); ++ if ((cmd == SD_SWITCH_VOLTAGE) && ++ (host->mmc->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_180)) { ++ /* recover the auto clock */ ++ spacemit_sdhci_set_clk_gate(host, 1); ++ } ++ } ++ ++ return ret; ++} ++ ++static void spacemit_init_card_quriks(struct mmc_host *mmc, struct mmc_card *card) ++{ ++ struct k1_sdhci_platdata *pdata = mmc->parent->platform_data; ++ struct sdhci_host *host = mmc_priv(mmc); ++ struct rx_tuning *rxtuning = &pdata->rxtuning; ++ ++ if (mmc->caps2 & MMC_CAP2_NO_MMC) { ++ /* break sdr104 */ ++ if (mmc->caps2 & SDHCI_QUIRK2_BROKEN_SDR104) { ++ mmc->caps &= ~MMC_CAP_UHS_SDR104; ++ mmc->caps2 &= ~SDHCI_QUIRK2_BROKEN_SDR104; ++ } else { ++ if (rxtuning->tuning_fail) { ++ /* fallback bus speed */ ++ mmc->caps &= ~MMC_CAP_UHS_SDR104; ++ rxtuning->tuning_fail = 0; ++ } else if (!(pdata->host_caps_disable & MMC_CAP_UHS_SDR104) && ++ !(host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)) ++ /* recovery sdr104 capability */ ++ mmc->caps |= MMC_CAP_UHS_SDR104; ++ } ++ } ++ ++ if (!(mmc->caps2 & MMC_CAP2_NO_SDIO)) { ++ /* disable MMC_CAP2_SDIO_IRQ_NOTHREAD */ ++ mmc->caps2 &= ~MMC_CAP2_SDIO_IRQ_NOTHREAD; ++ ++ /* use the fake irq pending to avoid to read the SDIO_CCCR_INTx ++ * which sometimes return an abnormal value. ++ */ ++ mmc->sdio_irq_pending = true; ++ } ++} ++ ++static void spacemit_sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) ++{ ++ if (!(host->flags & SDHCI_DEVICE_DEAD)) { ++ if (enable) ++ host->ier |= SDHCI_INT_CARD_INT; ++ else ++ host->ier &= ~SDHCI_INT_CARD_INT; ++ ++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); ++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); ++ } ++} ++ ++static void spacemit_sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) ++{ ++ struct sdhci_host *host = mmc_priv(mmc); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&host->lock, flags); ++ spacemit_sdhci_enable_sdio_irq_nolock(host, enable); ++ spin_unlock_irqrestore(&host->lock, flags); ++} ++ ++static void spacemit_enable_sdio_irq(struct mmc_host *mmc, int enable) ++{ ++ struct sdhci_host *host = mmc_priv(mmc); ++ unsigned long flags; ++ ++ spacemit_sdhci_enable_sdio_irq(mmc, enable); ++ ++ /* avoid to read the SDIO_CCCR_INTx */ ++ spin_lock_irqsave(&host->lock, flags); ++ mmc->sdio_irq_pending = true; ++ spin_unlock_irqrestore(&host->lock, flags); ++} ++ ++static void spacemit_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) ++{ ++ struct k1_sdhci_platdata *pdata = mmc->parent->platform_data; ++ ++ if (!(mmc->caps2 & MMC_CAP2_NO_SDIO)) { ++ while (atomic_inc_return(&pdata->ref_count) > 1) { ++ atomic_dec(&pdata->ref_count); ++ wait_event(pdata->wait_queue, atomic_read(&pdata->ref_count) == 0); ++ } ++ } ++ ++ sdhci_request(mmc, mrq); ++} ++ ++static void spacemit_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) ++{ ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_spacemit *spacemit = sdhci_pltfm_priv(pltfm_host); ++ struct mmc_host *mmc = host->mmc; ++ unsigned int reg; ++ u32 cmd; ++ ++ /* according to the SDHC_TX_CFG_REG(0x11c), ++ * set TX_INT_CLK_SEL to gurantee the hold time ++ * at default speed mode or HS/SDR12/SDR25/SDR50 mode. ++ */ ++ reg = sdhci_readl(host, SDHC_TX_CFG_REG); ++ if ((mmc->ios.timing == MMC_TIMING_LEGACY) || ++ (mmc->ios.timing == MMC_TIMING_SD_HS) || ++ (mmc->ios.timing == MMC_TIMING_UHS_SDR12) || ++ (mmc->ios.timing == MMC_TIMING_UHS_SDR25) || ++ (mmc->ios.timing == MMC_TIMING_UHS_SDR50) || ++ (mmc->ios.timing == MMC_TIMING_MMC_HS)) ++ reg |= TX_INT_CLK_SEL; ++ else ++ reg &= ~TX_INT_CLK_SEL; ++ sdhci_writel(host, reg, SDHC_TX_CFG_REG); ++ ++ /* set pinctrl state */ ++ if (spacemit->pinctrl && !IS_ERR(spacemit->pinctrl)) { ++ if (clock >= 200000000) { ++ spacemit->pin = pinctrl_lookup_state(spacemit->pinctrl, "fast"); ++ if (IS_ERR(spacemit->pin)) ++ pr_warn("could not get sdhci fast pinctrl state.\n"); ++ else ++ pinctrl_select_state(spacemit->pinctrl, spacemit->pin); ++ } else { ++ spacemit->pin = pinctrl_lookup_state(spacemit->pinctrl, "default"); ++ if (IS_ERR(spacemit->pin)) ++ pr_warn("could not get sdhci default pinctrl state.\n"); ++ else ++ pinctrl_select_state(spacemit->pinctrl, spacemit->pin); ++ } ++ } ++ ++ sdhci_set_clock(host, clock); ++ ++ if (host->mmc->caps2 & MMC_CAP2_NO_MMC) { ++ /* ++ * according to the SD spec, during a signal voltage level switch, ++ * the clock must be closed for 5 ms. ++ * then, the host starts providing clk at 1.8 and the host checks whether ++ * DAT[3:0] is high after 1ms clk. ++ * ++ * for the above goal, temporarily disable the auto clk and keep clk always ++ * on for 1ms. ++ */ ++ cmd = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); ++ if ((cmd == SD_SWITCH_VOLTAGE) && ++ (host->mmc->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_180)) { ++ /* disable auto clock */ ++ if (clock) ++ /* ++ * some sdio device's signal level is already 1.8V before ++ * voltage switch, so we should avoid generating clock multiple ++ * times during switch sequence. ++ */ ++ spacemit_sdhci_set_clk_gate(host, 0); ++ } ++ } ++}; ++ ++static void spacemit_sdhci_phy_dll_init(struct sdhci_host *host) ++{ ++ u32 reg; ++ int i; ++ ++ /* config dll_reg1 & dll_reg2 */ ++ reg = sdhci_readl(host, SDHC_PHY_DLLCFG); ++ reg |= (DLL_PREDLY_NUM | DLL_FULLDLY_RANGE | DLL_VREG_CTRL); ++ sdhci_writel(host, reg, SDHC_PHY_DLLCFG); ++ ++ reg = sdhci_readl(host, SDHC_PHY_DLLCFG1); ++ reg |= (DLL_REG1_CTRL & DLL_REG1_CTRL_MASK); ++ sdhci_writel(host, reg, SDHC_PHY_DLLCFG1); ++ ++ /* dll enable */ ++ reg = sdhci_readl(host, SDHC_PHY_DLLCFG); ++ reg |= DLL_ENABLE; ++ sdhci_writel(host, reg, SDHC_PHY_DLLCFG); ++ ++ /* wait dll lock */ ++ i = 0; ++ while (i++ < 100) { ++ if (sdhci_readl(host, SDHC_PHY_DLLSTS) & DLL_LOCK_STATE) ++ break; ++ udelay(10); ++ } ++ if (i == 100) ++ pr_err("%s: dll lock timeout\n", mmc_hostname(host->mmc)); ++} ++ ++static void spacemit_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc, struct mmc_ios *ios) ++{ ++ u32 reg; ++ struct sdhci_host *host = mmc_priv(mmc); ++ ++ reg = sdhci_readl(host, SDHC_MMC_CTRL_REG); ++ if (ios->enhanced_strobe) ++ reg |= ENHANCE_STROBE_EN; ++ else ++ reg &= ~ENHANCE_STROBE_EN; ++ sdhci_writel(host, reg, SDHC_MMC_CTRL_REG); ++ ++ if (ios->enhanced_strobe) ++ spacemit_sdhci_phy_dll_init(host); ++} ++ ++static int spacemit_sdhci_start_signal_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios) ++{ ++ struct sdhci_host *host = mmc_priv(mmc); ++ u16 ctrl; ++ int ret; ++ ++ /* ++ * Signal Voltage Switching is only applicable for Host Controllers ++ * v3.00 and above. ++ */ ++ if (host->version < SDHCI_SPEC_300) ++ return 0; ++ ++ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); ++ ++ switch (ios->signal_voltage) { ++ case MMC_SIGNAL_VOLTAGE_330: ++ if (!(host->flags & SDHCI_SIGNALING_330)) ++ return -EINVAL; ++ /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ ++ ctrl &= ~SDHCI_CTRL_VDD_180; ++ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); ++ ++ /* Some controller need to do more when switching */ ++ if (host->ops->voltage_switch) ++ host->ops->voltage_switch(host); ++ ++ if (!IS_ERR(mmc->supply.vqmmc)) { ++ ret = mmc_regulator_set_vqmmc(mmc, ios); ++ if (ret < 0) { ++ pr_warn("%s: Switching to 3.3V signalling voltage failed\n", ++ mmc_hostname(mmc)); ++ return -EIO; ++ } ++ } ++ /* Wait for 5ms */ ++ usleep_range(5000, 5500); ++ ++ /* 3.3V regulator output should be stable within 5 ms */ ++ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); ++ if (!(ctrl & SDHCI_CTRL_VDD_180)) ++ return 0; ++ ++ pr_warn("%s: 3.3V regulator output did not become stable\n", mmc_hostname(mmc)); ++ return -EAGAIN; ++ ++ case MMC_SIGNAL_VOLTAGE_180: ++ if (!(host->flags & SDHCI_SIGNALING_180)) ++ return -EINVAL; ++ if (!IS_ERR(mmc->supply.vqmmc)) { ++ ret = mmc_regulator_set_vqmmc(mmc, ios); ++ if (ret < 0) { ++ pr_warn("%s: Switching to 1.8V signalling voltage failed\n", ++ mmc_hostname(mmc)); ++ return -EIO; ++ } ++ } ++ ++ /* ++ * Enable 1.8V Signal Enable in the Host Control2 ++ * register ++ */ ++ ctrl |= SDHCI_CTRL_VDD_180; ++ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); ++ ++ /* Some controller need to do more when switching */ ++ if (host->ops->voltage_switch) ++ host->ops->voltage_switch(host); ++ ++ /* 1.8V regulator output should be stable within 5 ms */ ++ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); ++ if (ctrl & SDHCI_CTRL_VDD_180) ++ return 0; ++ ++ pr_warn("%s: 1.8V regulator output did not become stable\n", ++ mmc_hostname(mmc)); ++ ++ return -EAGAIN; ++ ++ case MMC_SIGNAL_VOLTAGE_120: ++ if (!(host->flags & SDHCI_SIGNALING_120)) ++ return -EINVAL; ++ if (!IS_ERR(mmc->supply.vqmmc)) { ++ ret = mmc_regulator_set_vqmmc(mmc, ios); ++ if (ret < 0) { ++ pr_warn("%s: Switching to 1.2V signalling voltage failed\n", ++ mmc_hostname(mmc)); ++ return -EIO; ++ } ++ } ++ return 0; ++ ++ default: ++ /* No signal voltage switch required */ ++ return 0; ++ } ++} ++ ++static void spacemit_set_aib_mmc1_io(struct sdhci_host *host, int vol) ++{ ++ void __iomem *aib_mmc1_io; ++ void __iomem *apbc_asfar; ++ void __iomem *apbc_assar; ++ u32 reg; ++ struct platform_device *pdev; ++ struct k1_sdhci_platdata *pdata; ++ ++ pdev = to_platform_device(mmc_dev(host->mmc)); ++ pdata = pdev->dev.platform_data; ++ ++ if (!pdata->aib_mmc1_io_reg || ++ !pdata->apbc_asfar_reg || ++ !pdata->apbc_assar_reg) ++ return; ++ ++ aib_mmc1_io = ioremap(pdata->aib_mmc1_io_reg, 4); ++ apbc_asfar = ioremap(pdata->apbc_asfar_reg, 4); ++ apbc_assar = ioremap(pdata->apbc_assar_reg, 4); ++ ++ writel(AKEY_ASFAR, apbc_asfar); ++ writel(AKEY_ASSAR, apbc_assar); ++ reg = readl(aib_mmc1_io); ++ ++ switch (vol) { ++ case MMC_SIGNAL_VOLTAGE_180: ++ reg |= MMC1_IO_V18EN; ++ break; ++ default: ++ reg &= ~MMC1_IO_V18EN; ++ break; ++ } ++ writel(AKEY_ASFAR, apbc_asfar); ++ writel(AKEY_ASSAR, apbc_assar); ++ writel(reg, aib_mmc1_io); ++ ++ iounmap(apbc_assar); ++ iounmap(apbc_asfar); ++ iounmap(aib_mmc1_io); ++} ++ ++static void spacemit_sdhci_voltage_switch(struct sdhci_host *host) ++{ ++ struct mmc_host *mmc = host->mmc; ++ struct mmc_ios ios = mmc->ios; ++ ++ /* ++ * v18en(MS) bit should meet TSMC's requirement when switch SOC SD ++ * IO voltage from 3.3(3.0)v to 1.8v ++ */ ++ if (host->quirks2 & SDHCI_QUIRK2_SET_AIB_MMC) ++ spacemit_set_aib_mmc1_io(host, ios.signal_voltage); ++} ++ ++static void spacemit_sw_rx_tuning_prepare(struct sdhci_host *host, u8 dline_reg) ++{ ++ struct mmc_host *mmc = host->mmc; ++ struct mmc_ios ios = mmc->ios; ++ u32 reg; ++ ++ reg = sdhci_readl(host, SDHC_DLINE_CFG_REG); ++ reg &= ~(RX_DLINE_REG_MASK << RX_DLINE_REG_SHIFT); ++ reg |= dline_reg << RX_DLINE_REG_SHIFT; ++ reg &= ~(RX_DLINE_GAIN_MASK << RX_DLINE_GAIN_SHIFT); ++ if ((ios.timing == MMC_TIMING_UHS_SDR50) && (reg & 0x40)) ++ reg |= RX_DLINE_GAIN << RX_DLINE_GAIN_SHIFT; ++ sdhci_writel(host, reg, SDHC_DLINE_CFG_REG); ++ ++ reg = sdhci_readl(host, SDHC_DLINE_CTRL_REG); ++ reg |= DLINE_PU; ++ sdhci_writel(host, reg, SDHC_DLINE_CTRL_REG); ++ udelay(5); ++ ++ reg = sdhci_readl(host, SDHC_RX_CFG_REG); ++ reg &= ~(RX_SDCLK_SEL1_MASK << RX_SDCLK_SEL1_SHIFT); ++ reg |= RX_SDCLK_SEL1 << RX_SDCLK_SEL1_SHIFT; ++ sdhci_writel(host, reg, SDHC_RX_CFG_REG); ++ ++ if ((mmc->ios.timing == MMC_TIMING_MMC_HS200) ++ && !(host->quirks2 & SDHCI_QUIRK2_BROKEN_PHY_MODULE)) { ++ reg = sdhci_readl(host, SDHC_PHY_FUNC_REG); ++ reg |= HS200_USE_RFIFO; ++ sdhci_writel(host, reg, SDHC_PHY_FUNC_REG); ++ } ++} ++ ++static void spacemit_sw_rx_set_delaycode(struct sdhci_host *host, u32 delay) ++{ ++ u32 reg; ++ ++ reg = sdhci_readl(host, SDHC_DLINE_CTRL_REG); ++ reg &= ~(RX_DLINE_CODE_MASK << RX_DLINE_CODE_SHIFT); ++ reg |= (delay & RX_DLINE_CODE_MASK) << RX_DLINE_CODE_SHIFT; ++ sdhci_writel(host, reg, SDHC_DLINE_CTRL_REG); ++} ++ ++static void spacemit_sw_tx_tuning_prepare(struct sdhci_host *host) ++{ ++ u32 reg; ++ ++ /* set TX_MUX_SEL */ ++ reg = sdhci_readl(host, SDHC_TX_CFG_REG); ++ reg |= TX_MUX_SEL; ++ sdhci_writel(host, reg, SDHC_TX_CFG_REG); ++ ++ reg = sdhci_readl(host, SDHC_DLINE_CTRL_REG); ++ reg |= DLINE_PU; ++ sdhci_writel(host, reg, SDHC_DLINE_CTRL_REG); ++ udelay(5); ++} ++ ++static void spacemit_sw_tx_set_dlinereg(struct sdhci_host *host, u8 dline_reg) ++{ ++ u32 reg; ++ ++ reg = sdhci_readl(host, SDHC_DLINE_CFG_REG); ++ reg &= ~(TX_DLINE_REG_MASK << TX_DLINE_REG_SHIFT); ++ reg |= dline_reg << TX_DLINE_REG_SHIFT; ++ sdhci_writel(host, reg, SDHC_DLINE_CFG_REG); ++} ++ ++static void spacemit_sw_tx_set_delaycode(struct sdhci_host *host, u32 delay) ++{ ++ u32 reg; ++ ++ reg = sdhci_readl(host, SDHC_DLINE_CTRL_REG); ++ reg &= ~(TX_DLINE_CODE_MASK << TX_DLINE_CODE_SHIFT); ++ reg |= (delay & TX_DLINE_CODE_MASK) << TX_DLINE_CODE_SHIFT; ++ sdhci_writel(host, reg, SDHC_DLINE_CTRL_REG); ++} ++ ++static void spacemit_sdhci_clear_set_irqs(struct sdhci_host *host, u32 clr, u32 set) ++{ ++ u32 ier; ++ ++ ier = sdhci_readl(host, SDHCI_INT_ENABLE); ++ ier &= ~clr; ++ ier |= set; ++ sdhci_writel(host, ier, SDHCI_INT_ENABLE); ++ sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE); ++} ++ ++static int spacemit_tuning_patten_check(struct sdhci_host *host, int point) ++{ ++ u32 read_patten; ++ unsigned int i; ++ u32 *tuning_patten; ++ int patten_len; ++ int err = 0; ++ ++ if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) { ++ tuning_patten = (u32 *)tuning_patten8; ++ patten_len = ARRAY_SIZE(tuning_patten8); ++ } else { ++ tuning_patten = (u32 *)tuning_patten4; ++ patten_len = ARRAY_SIZE(tuning_patten4); ++ } ++ ++ for (i = 0; i < patten_len; i++) { ++ read_patten = sdhci_readl(host, SDHCI_BUFFER); ++ if (read_patten != tuning_patten[i]) ++ err++; ++ } ++ ++ return err; ++} ++ ++static int spacemit_send_tuning_cmd(struct sdhci_host *host, u32 opcode, ++ int point, unsigned long flags) ++{ ++ int err = 0; ++ ++ spin_unlock_irqrestore(&host->lock, flags); ++ ++ sdhci_send_tuning(host, opcode); ++ ++ spin_lock_irqsave(&host->lock, flags); ++ if (!host->tuning_done) { ++ pr_err("%s: Timeout waiting for Buffer Read Ready interrupt\n", ++ mmc_hostname(host->mmc)); ++ sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA); ++ } else ++ err = spacemit_tuning_patten_check(host, point); ++ ++ host->tuning_done = 0; ++ return err; ++} ++ ++static int spacemit_sw_rx_select_window(struct sdhci_host *host, u32 opcode) ++{ ++ int min; ++ int max; ++ u16 ctrl; ++ u32 ier; ++ unsigned long flags = 0; ++ int err = 0; ++ int i, j, len; ++ struct tuning_window tmp; ++ struct mmc_host *mmc = host->mmc; ++ struct k1_sdhci_platdata *pdata = mmc->parent->platform_data; ++ struct rx_tuning *rxtuning = &pdata->rxtuning; ++ ++ /* change to pio mode during the tuning stage */ ++ spin_lock_irqsave(&host->lock, flags); ++ ier = sdhci_readl(host, SDHCI_INT_ENABLE); ++ spacemit_sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL); ++ ++ min = SDHC_RX_TUNE_DELAY_MIN; ++ do { ++ /* find the mininum delay first which can pass tuning */ ++ while (min < SDHC_RX_TUNE_DELAY_MAX) { ++ spacemit_sw_rx_set_delaycode(host, min); ++ if (!mmc->ops->get_cd(mmc)) { ++ spin_unlock_irqrestore(&host->lock, flags); ++ return -ENODEV; ++ } ++ err = spacemit_send_tuning_cmd(host, opcode, min, flags); ++ if (err == -EIO) { ++ spin_unlock_irqrestore(&host->lock, flags); ++ return -EIO; ++ } ++ if (!err) ++ break; ++ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); ++ ctrl &= ~(SDHCI_CTRL_TUNED_CLK | SDHCI_CTRL_EXEC_TUNING); ++ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); ++ min += SDHC_RX_TUNE_DELAY_STEP; ++ } ++ ++ /* find the maxinum delay which can not pass tuning */ ++ max = min + SDHC_RX_TUNE_DELAY_STEP; ++ while (max < SDHC_RX_TUNE_DELAY_MAX) { ++ spacemit_sw_rx_set_delaycode(host, max); ++ if (!mmc->ops->get_cd(mmc)) { ++ spin_unlock_irqrestore(&host->lock, flags); ++ return -ENODEV; ++ } ++ err = spacemit_send_tuning_cmd(host, opcode, max, flags); ++ if (err) { ++ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); ++ ctrl &= ~(SDHCI_CTRL_TUNED_CLK | SDHCI_CTRL_EXEC_TUNING); ++ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); ++ if (err == -EIO) { ++ spin_unlock_irqrestore(&host->lock, flags); ++ return -EIO; ++ } ++ break; ++ } ++ max += SDHC_RX_TUNE_DELAY_STEP; ++ } ++ ++ /* store the top 3 window */ ++ if ((max - min) >= rxtuning->window_limit) { ++ tmp.max_delay = max; ++ tmp.min_delay = min; ++ tmp.type = pdata->rxtuning.window_type; ++ for (i = 0; i < CANDIDATE_WIN_NUM; i++) { ++ len = rxtuning->windows[i].max_delay - ++ rxtuning->windows[i].min_delay; ++ if ((tmp.max_delay - tmp.min_delay) > len) { ++ for (j = CANDIDATE_WIN_NUM - 1; j > i; j--) ++ rxtuning->windows[j] = rxtuning->windows[j-1]; ++ ++ rxtuning->windows[i] = tmp; ++ break; ++ } ++ } ++ } ++ min = max + SDHC_RX_TUNE_DELAY_STEP; ++ } while (min < SDHC_RX_TUNE_DELAY_MAX); ++ ++ spacemit_sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier); ++ spin_unlock_irqrestore(&host->lock, flags); ++ return 0; ++} ++ ++static int spacemit_sw_rx_select_delay(struct sdhci_host *host) ++{ ++ int i; ++ int win_len, min, max, mid; ++ struct tuning_window *window; ++ ++ struct mmc_host *mmc = host->mmc; ++ struct k1_sdhci_platdata *pdata = mmc->parent->platform_data; ++ struct rx_tuning *tuning = &pdata->rxtuning; ++ ++ for (i = 0; i < CANDIDATE_WIN_NUM; i++) { ++ window = &tuning->windows[i]; ++ min = window->min_delay; ++ max = window->max_delay; ++ mid = (min + max - 1) / 2; ++ win_len = max - min; ++ if (win_len < tuning->window_limit) ++ continue; ++ ++ if (window->type == LEFT_WINDOW) { ++ tuning->select_delay[tuning->select_delay_num++] = min + win_len / 4; ++ tuning->select_delay[tuning->select_delay_num++] = min + win_len / 3; ++ } else if (window->type == RIGHT_WINDOW) { ++ tuning->select_delay[tuning->select_delay_num++] = max - win_len / 4; ++ tuning->select_delay[tuning->select_delay_num++] = max - win_len / 3; ++ } else { ++ tuning->select_delay[tuning->select_delay_num++] = mid; ++ tuning->select_delay[tuning->select_delay_num++] = mid + win_len / 4; ++ tuning->select_delay[tuning->select_delay_num++] = mid - win_len / 4; ++ } ++ } ++ ++ return tuning->select_delay_num; ++} ++ ++static void spacemit_sw_rx_card_store(struct sdhci_host *host, struct rx_tuning *tuning) ++{ ++ struct mmc_card *card = host->mmc->card; ++ ++ if (card) ++ memcpy(tuning->card_cid, card->raw_cid, sizeof(card->raw_cid)); ++} ++ ++static int spacemit_sw_rx_card_pretuned(struct sdhci_host *host, struct rx_tuning *tuning) ++{ ++ struct mmc_card *card = host->mmc->card; ++ ++ if (!card) ++ return 0; ++ ++ return !memcmp(tuning->card_cid, card->raw_cid, sizeof(card->raw_cid)); ++} ++ ++static int spacemit_sdhci_execute_sw_tuning(struct sdhci_host *host, u32 opcode) ++{ ++ int ret; ++ int index; ++ struct mmc_host *mmc = host->mmc; ++ struct mmc_ios ios = mmc->ios; ++ struct k1_sdhci_platdata *pdata = mmc->parent->platform_data; ++ struct rx_tuning *rxtuning = &pdata->rxtuning; ++ ++ /* ++ * Tuning is required for SDR50/SDR104, HS200/HS400 cards and ++ * if clock frequency is greater than 100MHz in these modes. ++ */ ++ if (host->clock < 100 * 1000 * 1000 || ++ !((ios.timing == MMC_TIMING_MMC_HS200) || ++ (ios.timing == MMC_TIMING_UHS_SDR50) || ++ (ios.timing == MMC_TIMING_UHS_SDR104))) ++ return 0; ++ ++ if (!(mmc->caps2 & MMC_CAP2_NO_SD) && !mmc->ops->get_cd(mmc)) ++ return 0; ++ ++ /* TX tuning config */ ++ if ((host->mmc->caps2 & MMC_CAP2_NO_MMC) || ++ (host->quirks2 & SDHCI_QUIRK2_BROKEN_PHY_MODULE)) { ++ spacemit_sw_tx_set_dlinereg(host, pdata->tx_dline_reg); ++ spacemit_sw_tx_set_delaycode(host, pdata->tx_delaycode); ++ spacemit_sw_tx_tuning_prepare(host); ++ } ++ ++ /* step 1: check pretuned card */ ++ if (spacemit_sw_rx_card_pretuned(host, rxtuning) && ++ rxtuning->select_delay_num) { ++ index = rxtuning->current_delay_index; ++ if (mmc->doing_retune) ++ index++; ++ if (index == rxtuning->select_delay_num) { ++ pr_warn("%s: all select delay failed, re-init to DDR50\n", ++ mmc_hostname(mmc)); ++ rxtuning->select_delay_num = 0; ++ rxtuning->current_delay_index = 0; ++ memset(rxtuning->windows, 0, sizeof(rxtuning->windows)); ++ memset(rxtuning->select_delay, 0xFF, sizeof(rxtuning->select_delay)); ++ memset(rxtuning->card_cid, 0, sizeof(rxtuning->card_cid)); ++ rxtuning->tuning_fail = 1; ++ return -EIO; ++ } ++ ++ spacemit_sw_rx_tuning_prepare(host, rxtuning->rx_dline_reg); ++ spacemit_sw_rx_set_delaycode(host, rxtuning->select_delay[index]); ++ rxtuning->current_delay_index = index; ++ return 0; ++ } ++ ++ rxtuning->select_delay_num = 0; ++ rxtuning->current_delay_index = 0; ++ memset(rxtuning->windows, 0, sizeof(rxtuning->windows)); ++ memset(rxtuning->select_delay, 0xFF, sizeof(rxtuning->select_delay)); ++ memset(rxtuning->card_cid, 0, sizeof(rxtuning->card_cid)); ++ ++ /* step 2: get pass window and calculate the select_delay */ ++ spacemit_sw_rx_tuning_prepare(host, rxtuning->rx_dline_reg); ++ ret = spacemit_sw_rx_select_window(host, opcode); ++ if (ret) { ++ pr_warn("%s: abort tuning, err:%d\n", mmc_hostname(mmc), ret); ++ rxtuning->tuning_fail = 1; ++ return -EIO; ++ } ++ ++ if (!spacemit_sw_rx_select_delay(host)) { ++ pr_warn("%s: fail to get delaycode\n", mmc_hostname(mmc)); ++ rxtuning->tuning_fail = 1; ++ return -EIO; ++ } ++ ++ /* step 3: set the delay code and store card cid */ ++ spacemit_sw_rx_set_delaycode(host, rxtuning->select_delay[0]); ++ spacemit_sw_rx_card_store(host, rxtuning); ++ rxtuning->tuning_fail = 0; ++ ++ return 0; ++} ++ ++static unsigned int spacemit_sdhci_clk_get_max_clock(struct sdhci_host *host) ++{ ++ unsigned long rate; ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ ++ rate = clk_get_rate(pltfm_host->clk); ++ return rate; ++} ++ ++static unsigned int spacemit_get_max_timeout_count(struct sdhci_host *host) ++{ ++ /* ++ * the default sdhci code use the 1 << 27 as the max timeout counter ++ * to calculate the max_busy_timeout. ++ * aquilac sdhci support 1 << 29 as the timeout counter. ++ */ ++ return 1 << 29; ++} ++ ++static int spacemit_sdhci_pre_select_hs400(struct mmc_host *mmc) ++{ ++ u32 reg; ++ struct sdhci_host *host = mmc_priv(mmc); ++ ++ reg = sdhci_readl(host, SDHC_MMC_CTRL_REG); ++ reg |= MMC_HS400; ++ sdhci_writel(host, reg, SDHC_MMC_CTRL_REG); ++ host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; ++ ++ return 0; ++} ++ ++static void spacemit_sdhci_post_select_hs400(struct mmc_host *mmc) ++{ ++ struct sdhci_host *host = mmc_priv(mmc); ++ ++ spacemit_sdhci_phy_dll_init(host); ++ host->mmc->caps &= ~MMC_CAP_WAIT_WHILE_BUSY; ++} ++ ++static void spacemit_sdhci_pre_hs400_to_hs200(struct mmc_host *mmc) ++{ ++ u32 reg; ++ struct sdhci_host *host = mmc_priv(mmc); ++ ++ reg = sdhci_readl(host, SDHC_PHY_CTRL_REG); ++ reg &= ~(PHY_FUNC_EN | PHY_PLL_LOCK); ++ sdhci_writel(host, reg, SDHC_PHY_CTRL_REG); ++ ++ reg = sdhci_readl(host, SDHC_MMC_CTRL_REG); ++ reg &= ~(MMC_HS400 | MMC_HS200 | ENHANCE_STROBE_EN); ++ sdhci_writel(host, reg, SDHC_MMC_CTRL_REG); ++ ++ reg = sdhci_readl(host, SDHC_PHY_FUNC_REG); ++ reg &= ~HS200_USE_RFIFO; ++ sdhci_writel(host, reg, SDHC_PHY_FUNC_REG); ++ ++ udelay(5); ++ ++ reg = sdhci_readl(host, SDHC_PHY_CTRL_REG); ++ reg |= (PHY_FUNC_EN | PHY_PLL_LOCK); ++ sdhci_writel(host, reg, SDHC_PHY_CTRL_REG); ++} ++ ++static void spacemit_sdhci_request_done(struct sdhci_host *host, struct mmc_request *mrq) ++{ ++ struct mmc_host *mmc = host->mmc; ++ struct k1_sdhci_platdata *pdata = mmc->parent->platform_data; ++ ++ mmc_request_done(host->mmc, mrq); ++ ++ if (!(host->mmc->caps2 & MMC_CAP2_NO_SDIO)) { ++ atomic_dec(&pdata->ref_count); ++ wake_up(&pdata->wait_queue); ++ } ++} ++ ++static const struct sdhci_ops spacemit_sdhci_ops = { ++ .set_clock = spacemit_sdhci_set_clock, ++ .platform_send_init_74_clocks = spacemit_sdhci_gen_init_74_clocks, ++ .get_max_clock = spacemit_sdhci_clk_get_max_clock, ++ .get_max_timeout_count = spacemit_get_max_timeout_count, ++ .set_bus_width = sdhci_set_bus_width, ++ .reset = spacemit_sdhci_reset, ++ .set_uhs_signaling = spacemit_sdhci_set_uhs_signaling, ++ .voltage_switch = spacemit_sdhci_voltage_switch, ++ .platform_execute_tuning = spacemit_sdhci_execute_sw_tuning, ++ .irq = spacemit_handle_interrupt, ++ .set_power = sdhci_set_power_and_bus_voltage, ++ .request_done = spacemit_sdhci_request_done, ++}; ++ ++static struct sdhci_pltfm_data sdhci_k1_pdata = { ++ .ops = &spacemit_sdhci_ops, ++ .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK ++ | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC ++ | SDHCI_QUIRK_32BIT_ADMA_SIZE ++ | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, ++ .quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA, ++}; ++ ++static const struct of_device_id sdhci_spacemit_of_match[] = { ++ { ++ .compatible = "spacemit,k1-sdhci", ++ }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, sdhci_spacemit_of_match); ++ ++static struct k1_sdhci_platdata *spacemit_get_mmc_pdata(struct device *dev) ++{ ++ struct k1_sdhci_platdata *pdata; ++ ++ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); ++ if (!pdata) ++ return NULL; ++ return pdata; ++} ++ ++static void spacemit_get_of_property(struct sdhci_host *host, struct device *dev, ++ struct k1_sdhci_platdata *pdata) ++{ ++ struct device_node *np = dev->of_node; ++ u32 property; ++ int ret; ++ ++ /* sdh io clk */ ++ if (!of_property_read_u32(np, "spacemit,sdh-freq", &property)) ++ pdata->host_freq = property; ++ ++ if (!of_property_read_u32(np, "spacemit,sdh-flags", &property)) ++ pdata->flags |= property; ++ ++ if (!of_property_read_u32(np, "spacemit,sdh-host-caps", &property)) ++ pdata->host_caps |= property; ++ if (!of_property_read_u32(np, "spacemit,sdh-host-caps2", &property)) ++ pdata->host_caps2 |= property; ++ ++ if (!of_property_read_u32(np, "spacemit,sdh-host-caps-disable", &property)) ++ pdata->host_caps_disable |= property; ++ if (!of_property_read_u32(np, "spacemit,sdh-host-caps2-disable", &property)) ++ pdata->host_caps2_disable |= property; ++ ++ if (!of_property_read_u32(np, "spacemit,sdh-quirks", &property)) ++ pdata->quirks |= property; ++ if (!of_property_read_u32(np, "spacemit,sdh-quirks2", &property)) ++ pdata->quirks2 |= property; ++ ++ if (!of_property_read_u32(np, "spacemit,aib_mmc1_io_reg", &property)) ++ pdata->aib_mmc1_io_reg = property; ++ else ++ pdata->aib_mmc1_io_reg = 0x0; ++ ++ if (!of_property_read_u32(np, "spacemit,apbc_asfar_reg", &property)) ++ pdata->apbc_asfar_reg = property; ++ else ++ pdata->apbc_asfar_reg = 0x0; ++ ++ if (!of_property_read_u32(np, "spacemit,apbc_assar_reg", &property)) ++ pdata->apbc_assar_reg = property; ++ else ++ pdata->apbc_assar_reg = 0x0; ++ ++ /* read rx tuning dline_reg */ ++ if (!of_property_read_u32(np, "spacemit,rx_dline_reg", &property)) ++ pdata->rxtuning.rx_dline_reg = (u8)property; ++ else ++ pdata->rxtuning.rx_dline_reg = RX_TUNING_DLINE_REG; ++ ++ /* read rx tuning window limit */ ++ if (!of_property_read_u32(np, "spacemit,rx_tuning_limit", &property)) ++ pdata->rxtuning.window_limit = (u8)property; ++ else ++ pdata->rxtuning.window_limit = RX_TUNING_WINDOW_THRESHOLD; ++ ++ /* read rx tuning window type */ ++ if (!of_property_read_u32(np, "spacemit,rx_tuning_type", &property)) ++ pdata->rxtuning.window_type = (u8)property; ++ else ++ pdata->rxtuning.window_type = MIDDLE_WINDOW; ++ ++ /* tx tuning dline_reg */ ++ if (!of_property_read_u32(np, "spacemit,tx_dline_reg", &property)) ++ pdata->tx_dline_reg = (u8)property; ++ else ++ pdata->tx_dline_reg = TX_TUNING_DLINE_REG; ++ ++ ret = of_property_read_variable_u32_array(np, "spacemit,tx_delaycode", ++ pdata->tx_delaycode_array, 1, 2); ++ if (ret > 0) ++ pdata->tx_delaycode_cnt = (u8)ret; ++ else { ++ pdata->tx_delaycode_array[0] = TX_TUNING_DELAYCODE; ++ pdata->tx_delaycode_cnt = 1; ++ } ++ pdata->tx_delaycode = pdata->tx_delaycode_array[0]; ++ ++ /* phy driver select */ ++ if (!of_property_read_u32(np, "spacemit,phy_driver_sel", &property)) ++ pdata->phy_driver_sel = (u8)property; ++ else ++ pdata->phy_driver_sel = PHY_DRIVE_SEL_DEFAULT; ++ ++ /* read rx tuning cpufreq, unit 1000Hz */ ++ if (!of_property_read_u32(np, "spacemit,rx_tuning_freq", &property)) ++ pdata->rx_tuning_freq = property; ++} ++ ++ssize_t sdhci_tx_delaycode_show(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ struct sdhci_host *host = dev_get_drvdata(dev); ++ struct mmc_host *mmc = host->mmc; ++ struct k1_sdhci_platdata *pdata = mmc->parent->platform_data; ++ ++ return sprintf(buf, "0x%02x\n", pdata->tx_delaycode); ++} ++ ++ssize_t sdhci_tx_delaycode_set(struct device *dev, struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct sdhci_host *host = dev_get_drvdata(dev); ++ struct mmc_host *mmc = host->mmc; ++ struct k1_sdhci_platdata *pdata = mmc->parent->platform_data; ++ u8 delaycode; ++ ++ if (kstrtou8(buf, 0, &delaycode)) ++ return -EINVAL; ++ ++ pdata->tx_delaycode = delaycode; ++ return count; ++} ++ ++static int spacemit_sdhci_probe(struct platform_device *pdev) ++{ ++ struct sdhci_pltfm_host *pltfm_host; ++ struct device *dev = &pdev->dev; ++ struct sdhci_host *host; ++ const struct of_device_id *match; ++ struct sdhci_spacemit *spacemit; ++ struct k1_sdhci_platdata *pdata; ++ int ret; ++ ++ host = sdhci_pltfm_init(pdev, &sdhci_k1_pdata, sizeof(*spacemit)); ++ if (IS_ERR(host)) ++ return PTR_ERR(host); ++ ++ pltfm_host = sdhci_priv(host); ++ spacemit = sdhci_pltfm_priv(pltfm_host); ++ spacemit->clk_io = devm_clk_get(dev, "sdh-io"); ++ if (IS_ERR(spacemit->clk_io)) ++ spacemit->clk_io = devm_clk_get(dev, NULL); ++ if (IS_ERR(spacemit->clk_io)) { ++ dev_err(dev, "failed to get io clock\n"); ++ ret = PTR_ERR(spacemit->clk_io); ++ goto err_clk_get; ++ } ++ pltfm_host->clk = spacemit->clk_io; ++ clk_prepare_enable(spacemit->clk_io); ++ ++ spacemit->clk_core = devm_clk_get(dev, "sdh-core"); ++ if (!IS_ERR(spacemit->clk_core)) ++ clk_prepare_enable(spacemit->clk_core); ++ ++ spacemit->clk_aib = devm_clk_get(dev, "aib-clk"); ++ if (!IS_ERR(spacemit->clk_aib)) ++ clk_prepare_enable(spacemit->clk_aib); ++ ++ spacemit->reset = devm_reset_control_array_get_optional_shared(dev); ++ if (IS_ERR(spacemit->reset)) { ++ dev_err(dev, "failed to get reset control\n"); ++ ret = PTR_ERR(spacemit->reset); ++ goto err_rst_get; ++ } ++ ++ ret = reset_control_deassert(spacemit->reset); ++ if (ret) ++ goto err_rst_get; ++ ++ match = of_match_device(of_match_ptr(sdhci_spacemit_of_match), &pdev->dev); ++ if (match) { ++ ret = mmc_of_parse(host->mmc); ++ if (ret) ++ goto err_of_parse; ++ sdhci_get_of_property(pdev); ++ } ++ ++ pdata = pdev->dev.platform_data ? pdev->dev.platform_data : spacemit_get_mmc_pdata(dev); ++ if (IS_ERR_OR_NULL(pdata)) ++ goto err_of_parse; ++ ++ spacemit_get_of_property(host, dev, pdata); ++ if (pdata->quirks) ++ host->quirks |= pdata->quirks; ++ if (pdata->quirks2) ++ host->quirks2 |= pdata->quirks2; ++ if (pdata->host_caps) ++ host->mmc->caps |= pdata->host_caps; ++ if (pdata->host_caps2) ++ host->mmc->caps2 |= pdata->host_caps2; ++ if (pdata->pm_caps) ++ host->mmc->pm_caps |= pdata->pm_caps; ++ pdev->dev.platform_data = pdata; ++ ++ if (host->mmc->pm_caps) ++ host->mmc->pm_flags |= host->mmc->pm_caps; ++ ++ if (!(host->mmc->caps2 & MMC_CAP2_NO_MMC)) { ++ host->mmc_host_ops.hs400_prepare_ddr = spacemit_sdhci_pre_select_hs400; ++ host->mmc_host_ops.hs400_complete = spacemit_sdhci_post_select_hs400; ++ host->mmc_host_ops.hs400_downgrade = spacemit_sdhci_pre_hs400_to_hs200; ++ if (host->mmc->caps2 & MMC_CAP2_HS400_ES) ++ host->mmc_host_ops.hs400_enhanced_strobe = spacemit_sdhci_hs400_enhanced_strobe; ++ } ++ ++ host->mmc_host_ops.start_signal_voltage_switch = spacemit_sdhci_start_signal_voltage_switch; ++ host->mmc_host_ops.card_busy = spacemit_sdhci_card_busy; ++ host->mmc_host_ops.init_card = spacemit_init_card_quriks; ++ host->mmc_host_ops.enable_sdio_irq = spacemit_enable_sdio_irq; ++ host->mmc_host_ops.request = spacemit_sdhci_request; ++ ++ /* skip auto rescan */ ++ if (!(host->mmc->caps2 & MMC_CAP2_NO_SDIO)) ++ host->mmc->rescan_entered = 1; ++ host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY; ++ ++ pm_runtime_get_noresume(&pdev->dev); ++ pm_runtime_set_active(&pdev->dev); ++ pm_runtime_set_autosuspend_delay(&pdev->dev, RPM_DELAY); ++ pm_runtime_use_autosuspend(&pdev->dev); ++ pm_runtime_enable(&pdev->dev); ++ pm_suspend_ignore_children(&pdev->dev, 1); ++ pm_runtime_get_sync(&pdev->dev); ++ ++ /* set io clock rate */ ++ if (pdata->host_freq) { ++ ret = clk_set_rate(spacemit->clk_io, pdata->host_freq); ++ if (ret) { ++ dev_err(dev, "failed to set io clock freq\n"); ++ goto err_host_freq; ++ } ++ } else { ++ dev_err(dev, "failed to get io clock freq\n"); ++ goto err_host_freq; ++ } ++ ++ init_waitqueue_head(&pdata->wait_queue); ++ atomic_set(&pdata->ref_count, 0); ++ ++ ret = sdhci_add_host(host); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to add spacemit sdhc.\n"); ++ goto err_host_freq; ++ } else if (!(host->mmc->caps2 & MMC_CAP2_NO_SDIO)) { ++ sdio_host = host; ++ } ++ ++ spacemit_sdhci_caps_disable(host); ++ ++ if ((host->mmc->caps2 & MMC_CAP2_NO_MMC) || ++ (host->quirks2 & SDHCI_QUIRK2_BROKEN_PHY_MODULE)) ++ spacemit->pinctrl = devm_pinctrl_get(&pdev->dev); ++ ++ if (host->mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ) ++ device_init_wakeup(&pdev->dev, 1); ++ ++ pm_runtime_put_autosuspend(&pdev->dev); ++ return 0; ++ ++err_host_freq: ++ pm_runtime_disable(&pdev->dev); ++ pm_runtime_put_noidle(&pdev->dev); ++err_of_parse: ++ reset_control_assert(spacemit->reset); ++err_rst_get: ++ if (!IS_ERR(spacemit->clk_aib)) ++ clk_disable_unprepare(spacemit->clk_aib); ++ clk_disable_unprepare(spacemit->clk_io); ++ clk_disable_unprepare(spacemit->clk_core); ++err_clk_get: ++ sdhci_pltfm_free(pdev); ++ return ret; ++} ++ ++static void spacemit_sdhci_remove(struct platform_device *pdev) ++{ ++ struct sdhci_host *host = platform_get_drvdata(pdev); ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_spacemit *spacemit = sdhci_pltfm_priv(pltfm_host); ++ ++ pm_runtime_get_sync(&pdev->dev); ++ pm_runtime_disable(&pdev->dev); ++ pm_runtime_put_noidle(&pdev->dev); ++ sdhci_remove_host(host, 1); ++ ++ reset_control_assert(spacemit->reset); ++ if (!IS_ERR(spacemit->clk_aib)) ++ clk_disable_unprepare(spacemit->clk_aib); ++ clk_disable_unprepare(spacemit->clk_io); ++ clk_disable_unprepare(spacemit->clk_core); ++ ++ sdhci_pltfm_free(pdev); ++} ++ ++static struct platform_driver spacemit_sdhci_driver = { ++ .driver = { ++ .name = "sdhci-spacemit", ++ .of_match_table = of_match_ptr(sdhci_spacemit_of_match), ++ }, ++ .probe = spacemit_sdhci_probe, ++ .remove_new = spacemit_sdhci_remove, ++}; ++ ++module_platform_driver(spacemit_sdhci_driver); ++ ++MODULE_DESCRIPTION("SDHCI platform driver for Spacemit"); ++MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/sdhci-sophgo.c b/drivers/mmc/host/sdhci-sophgo.c new file mode 100644 index 000000000000..8200ccaa68f6 @@ -356190,496 +390630,4158 @@ index d57ddaf1525b..c60656702063 100644 }; const struct spi_nor_manufacturer spi_nor_gigadevice = { +diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig +index f18cd4a57826..6153dacb0482 100644 +--- a/drivers/net/ethernet/Kconfig ++++ b/drivers/net/ethernet/Kconfig +@@ -198,6 +198,7 @@ source "drivers/net/ethernet/wiznet/Kconfig" + source "drivers/net/ethernet/xilinx/Kconfig" + source "drivers/net/ethernet/xircom/Kconfig" + source "drivers/net/ethernet/bzwx/Kconfig" ++source "drivers/net/ethernet/spacemit/Kconfig" + + source "drivers/net/ethernet/nebula-matrix/Kconfig" + +diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile +index e3d01399793d..23783cb761c4 100644 +--- a/drivers/net/ethernet/Makefile ++++ b/drivers/net/ethernet/Makefile +@@ -111,3 +111,4 @@ obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ + obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/ + obj-$(CONFIG_NET_VENDOR_BZWX) += bzwx/ + obj-$(CONFIG_NET_VENDOR_NEBULA_MATRIX) += nebula-matrix/ ++obj-$(CONFIG_NET_VENDOR_SPACEMIT) += spacemit/ diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c -index 4d7caa119971..0a9efd8c6471 100644 +index 5d46a8e5376d..683ac5b5ecd9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c -@@ -3213,7 +3213,8 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, +@@ -6,6 +6,7 @@ + #include + #include + #include ++#include + #include "i40e_adminq_cmd.h" + #include "i40e_devids.h" + #include "i40e_prototype.h" +@@ -3214,7 +3215,10 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, p->base_queue = phys_id; break; case I40E_AQ_CAP_ID_MSIX: - p->num_msix_vectors = number; -+ //p->num_msix_vectors = number; -+ p->num_msix_vectors = 8; ++ if (of_machine_is_compatible("sophgo,mango")) ++ p->num_msix_vectors = 8; ++ else ++ p->num_msix_vectors = number; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: MSIX vector count = %d\n", p->num_msix_vectors); -diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h -index c24a72d1e273..ca8b8d023a8f 100644 ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h -@@ -2069,7 +2069,7 @@ enum { - #define IXGBE_DEVICE_CAPS 0x2C - #define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 - #define IXGBE_PCIE_MSIX_82599_CAPS 0x72 --#define IXGBE_MAX_MSIX_VECTORS_82599 0x40 -+#define IXGBE_MAX_MSIX_VECTORS_82599 0x09 - #define IXGBE_PCIE_MSIX_82598_CAPS 0x62 - #define IXGBE_MAX_MSIX_VECTORS_82598 0x13 - -diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig -index 92d7d5a00b84..3db07c331c4b 100644 ---- a/drivers/net/ethernet/stmicro/stmmac/Kconfig -+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig -@@ -216,6 +216,24 @@ config DWMAC_SUN8I - stmmac device driver. This driver is used for H3/A83T/A64 - EMAC ethernet controller. +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +index 2e6e0365154a..1875792856f7 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +@@ -5,6 +5,7 @@ + #include + #include + #include ++#include -+config DWMAC_XUANTIE -+ tristate "XuanTie dwmac support" -+ depends on OF && (ARCH_XUANTIE || COMPILE_TEST) -+ select MFD_SYSCON + #include "ixgbe.h" + #include "ixgbe_common.h" +@@ -2916,7 +2917,10 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; +- max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; ++ if (of_machine_is_compatible("sophgo,mango")) ++ max_msix_count = 9; ++ else ++ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; + break; + default: + return 1; +diff --git a/drivers/net/ethernet/spacemit/Kconfig b/drivers/net/ethernet/spacemit/Kconfig +new file mode 100644 +index 000000000000..24893385e6bd +--- /dev/null ++++ b/drivers/net/ethernet/spacemit/Kconfig +@@ -0,0 +1,24 @@ ++config NET_VENDOR_SPACEMIT ++ bool "Spacemit devices" ++ default y ++ depends on SOC_SPACEMIT + help -+ Support for ethernet controllers on XuanTie RISC-V SoCs ++ If you have a network (Ethernet) chipset belonging to this class, ++ say Y. + -+ This selects the XuanTie platform specific glue layer support for -+ the stmmac device driver. This driver is used for XuanTie TH1520 -+ ethernet controller. ++ Note that the answer to this question does not directly affect ++ the kernel: saying N will just cause the configurator to skip all ++ the questions regarding Spacemit chipsets. If you say Y, you will ++ be asked for your specific chipset/driver in the following questions. + -+config DWMAC_SOPHGO -+ tristate "SOPHGO SG2042 GMAC support" -+ default ARCH_SOPHGO -+ depends on OF && (ARCH_SOPHGO || COMPILE_TEST) ++if NET_VENDOR_SPACEMIT ++ ++config K1_EMAC ++ tristate "k1 Emac Driver" ++ depends on SOC_SPACEMIT_K1 ++ select PHYLIB + help -+ BM-ethernet for sg2042 ++ This Driver support Spacemit k1 Ethernet MAC ++ Say Y to enable support for the Spacemit Ethernet. + - config DWMAC_IMX8 - tristate "NXP IMX8 DWMAC support" - default ARCH_MXC -diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile -index 5b57aee19267..21ff7179dddc 100644 ---- a/drivers/net/ethernet/stmicro/stmmac/Makefile -+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile -@@ -22,11 +22,13 @@ obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o - obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o - obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o - obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o -+obj-$(CONFIG_DWMAC_SOPHGO) += dwmac-sophgo.o - obj-$(CONFIG_DWMAC_STARFIVE) += dwmac-starfive.o - obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o - obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o - obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o - obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o -+obj-$(CONFIG_DWMAC_XUANTIE) += dwmac-xuantie.o - obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o - obj-$(CONFIG_DWMAC_INTEL_PLAT) += dwmac-intel-plat.o - obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o -diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c ++endif # NET_VENDOR_SPACEMIT +diff --git a/drivers/net/ethernet/spacemit/Makefile b/drivers/net/ethernet/spacemit/Makefile new file mode 100644 -index 000000000000..50a76c8f0df6 +index 000000000000..7a14d80d02f1 --- /dev/null -+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c -@@ -0,0 +1,268 @@ ++++ b/drivers/net/ethernet/spacemit/Makefile +@@ -0,0 +1,6 @@ ++# SPDX-License-Identifier: GPL-2.0 ++# ++# Makefile for the Spacemit network device drivers. ++# ++ ++obj-$(CONFIG_K1_EMAC) += k1-emac.o +diff --git a/drivers/net/ethernet/spacemit/k1-emac.c b/drivers/net/ethernet/spacemit/k1-emac.c +new file mode 100644 +index 000000000000..d12660124ea7 +--- /dev/null ++++ b/drivers/net/ethernet/spacemit/k1-emac.c +@@ -0,0 +1,2739 @@ ++// SPDX-License-Identifier: GPL-2.0 +/* -+ * DWMAC specific glue layer -+ * -+ * Copyright (c) 2018 Bitmain Ltd. ++ * spacemit k1 emac driver + * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. ++ * Copyright (c) 2023, spacemit Corporation. + * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. + */ + -+#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include +#include ++#include ++#include ++#include ++#include ++#include +#include ++#include +#include -+#include -+#include -+#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "k1-emac.h" + -+#include "stmmac_platform.h" ++#define DRIVER_NAME "k1_emac" + -+struct bm_mac { -+ struct device *dev; -+ struct reset_control *rst; -+ struct clk *clk_tx; -+ struct clk *gate_clk_tx; -+ struct clk *gate_clk_ref; -+ struct gpio_desc *reset; -+}; ++#define TUNING_CMD_LEN 50 ++#define CLK_PHASE_CNT 256 ++#define CLK_PHASE_REVERT 180 + -+static u64 bm_dma_mask = DMA_BIT_MASK(40); ++#define TXCLK_PHASE_DEFAULT 0 ++#define RXCLK_PHASE_DEFAULT 0 + -+static int bm_eth_reset_phy(struct platform_device *pdev) -+{ -+ struct device_node *np = pdev->dev.of_node; -+ int phy_reset_gpio; ++#define TX_PHASE 1 ++#define RX_PHASE 0 + -+ if (!np) -+ return 0; ++#define DEFAULT_TX_THRESHOLD (192) ++#define DEFAULT_RX_THRESHOLD (12) ++#define DEFAULT_TX_RING_NUM (128) ++#define DEFAULT_RX_RING_NUM (128) ++#define DEFAULT_DMA_BURST_LEN (1) ++#define HASH_TABLE_SIZE (64) + -+ phy_reset_gpio = of_get_named_gpio(np, "phy-reset-gpios", 0); ++#define EMAC_DMA_REG_CNT 16 ++#define EMAC_MAC_REG_CNT 124 ++#define EMAC_REG_SPACE_SIZE ((EMAC_DMA_REG_CNT + EMAC_MAC_REG_CNT) * 4) + -+ if (phy_reset_gpio < 0) -+ return 0; ++/* for ptp event message , udp port is 319 */ ++#define DEFAULT_UDP_PORT (0x13F) + -+ if (gpio_request(phy_reset_gpio, "eth-phy-reset")) -+ return 0; ++/* ptp ethernet type */ ++#define DEFAULT_ETH_TYPE (0x88F7) + -+ /* RESET_PU */ -+ gpio_direction_output(phy_reset_gpio, 0); -+ mdelay(100); ++#define EMAC_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4) + -+ gpio_direction_output(phy_reset_gpio, 1); -+ /* RC charging time */ -+ mdelay(100); ++#define INCVALUE_100MHZ 10 ++#define INCVALUE_SHIFT_100MHZ 17 ++#define INCPERIOD_100MHZ 1 + -+ return 0; -+} ++#define EMAC_ETHTOOL_STAT(x) { #x, offsetof(struct emac_hw_stats, x) / sizeof(u32) } + -+static void bm_mac_fix_speed(void *priv, unsigned int speed, unsigned int mode) -+{ -+ struct bm_mac *bsp_priv = priv; -+ unsigned long rate = 125000000; -+ bool needs_calibration = false; -+ int err; ++/* strings used by ethtool */ ++static const struct emac_ethtool_stats { ++ char str[ETH_GSTRING_LEN]; ++ u32 offset; ++} emac_ethtool_stats[] = { ++ EMAC_ETHTOOL_STAT(tx_ok_pkts), ++ EMAC_ETHTOOL_STAT(tx_total_pkts), ++ EMAC_ETHTOOL_STAT(tx_ok_bytes), ++ EMAC_ETHTOOL_STAT(tx_err_pkts), ++ EMAC_ETHTOOL_STAT(tx_singleclsn_pkts), ++ EMAC_ETHTOOL_STAT(tx_multiclsn_pkts), ++ EMAC_ETHTOOL_STAT(tx_lateclsn_pkts), ++ EMAC_ETHTOOL_STAT(tx_excessclsn_pkts), ++ EMAC_ETHTOOL_STAT(tx_unicast_pkts), ++ EMAC_ETHTOOL_STAT(tx_multicast_pkts), ++ EMAC_ETHTOOL_STAT(tx_broadcast_pkts), ++ EMAC_ETHTOOL_STAT(tx_pause_pkts), ++ EMAC_ETHTOOL_STAT(rx_ok_pkts), ++ EMAC_ETHTOOL_STAT(rx_total_pkts), ++ EMAC_ETHTOOL_STAT(rx_crc_err_pkts), ++ EMAC_ETHTOOL_STAT(rx_align_err_pkts), ++ EMAC_ETHTOOL_STAT(rx_err_total_pkts), ++ EMAC_ETHTOOL_STAT(rx_ok_bytes), ++ EMAC_ETHTOOL_STAT(rx_total_bytes), ++ EMAC_ETHTOOL_STAT(rx_unicast_pkts), ++ EMAC_ETHTOOL_STAT(rx_multicast_pkts), ++ EMAC_ETHTOOL_STAT(rx_broadcast_pkts), ++ EMAC_ETHTOOL_STAT(rx_pause_pkts), ++ EMAC_ETHTOOL_STAT(rx_len_err_pkts), ++ EMAC_ETHTOOL_STAT(rx_len_undersize_pkts), ++ EMAC_ETHTOOL_STAT(rx_len_oversize_pkts), ++ EMAC_ETHTOOL_STAT(rx_len_fragment_pkts), ++ EMAC_ETHTOOL_STAT(rx_len_jabber_pkts), ++ EMAC_ETHTOOL_STAT(rx_64_pkts), ++ EMAC_ETHTOOL_STAT(rx_65_127_pkts), ++ EMAC_ETHTOOL_STAT(rx_128_255_pkts), ++ EMAC_ETHTOOL_STAT(rx_256_511_pkts), ++ EMAC_ETHTOOL_STAT(rx_512_1023_pkts), ++ EMAC_ETHTOOL_STAT(rx_1024_1518_pkts), ++ EMAC_ETHTOOL_STAT(rx_1519_plus_pkts), ++ EMAC_ETHTOOL_STAT(rx_drp_fifo_full_pkts), ++ EMAC_ETHTOOL_STAT(rx_truncate_fifo_full_pkts), ++}; ++ ++enum clk_tuning_way { ++ /* fpga clk tuning register */ ++ CLK_TUNING_BY_REG, ++ /* zebu/evb rgmii delayline register */ ++ CLK_TUNING_BY_DLINE, ++ /* evb rmii only revert tx/rx clock for clk tuning */ ++ CLK_TUNING_BY_CLK_REVERT, ++ CLK_TUNING_MAX, ++}; ++ ++static void emac_hw_timestamp_config(struct emac_priv *priv, u32 enable, u8 rx_ptp_type, u32 ptp_msg_id) ++{ ++ void __iomem *ioaddr = priv->iobase; ++ u32 val; + -+ switch (speed) { -+ case SPEED_1000: -+ needs_calibration = true; -+ rate = 125000000; -+ break; ++ if (enable) { ++ /* enable tx/rx timestamp and config rx ptp type */ ++ val = TX_TIMESTAMP_EN | RX_TIMESTAMP_EN; ++ val |= (rx_ptp_type << RX_PTP_PKT_TYPE_OFST) & RX_PTP_PKT_TYPE_MSK; ++ writel(val, ioaddr + PTP_1588_CTRL); + -+ case SPEED_100: -+ needs_calibration = true; -+ rate = 25000000; -+ break; ++ /* config ptp message id */ ++ writel(ptp_msg_id, ioaddr + PTP_MSG_ID); + -+ case SPEED_10: -+ needs_calibration = true; -+ rate = 2500000; -+ break; ++ /* config ptp ethernet type */ ++ writel(DEFAULT_ETH_TYPE, ioaddr + PTP_ETH_TYPE); + -+ default: -+ dev_err(bsp_priv->dev, "invalid speed %u\n", speed); -+ break; -+ } ++ /* config ptp udp port */ ++ writel(DEFAULT_UDP_PORT, ioaddr + PTP_UDP_PORT); + -+ if (needs_calibration) { -+ err = clk_set_rate(bsp_priv->clk_tx, rate); -+ if (err < 0) -+ dev_err(bsp_priv->dev, "failed to set TX rate: %d\n" -+ , err); ++ } else { ++ writel(0, ioaddr + PTP_1588_CTRL); + } +} + -+void bm_dwmac_exit(struct platform_device *pdev, void *priv) ++static u32 emac_hw_config_systime_increment(struct emac_priv *priv, u32 ptp_clock, u32 adj_clock) +{ -+ struct bm_mac *bsp_priv = priv; ++ void __iomem *ioaddr = priv->iobase; ++ u32 incr_val; ++ u32 incr_period; ++ u32 val; ++ u32 period = 0, def_period = 0; + -+ clk_disable_unprepare(bsp_priv->gate_clk_tx); -+ clk_disable_unprepare(bsp_priv->gate_clk_ref); ++ /* set system time counter resolution as ns ++ * if ptp clock is 50Mhz, 20ns per clock cycle, ++ * so increment value should be 20, ++ * increment period should be 1m ++ */ ++ if (ptp_clock == adj_clock) { ++ incr_val = INCVALUE_100MHZ << INCVALUE_SHIFT_100MHZ; ++ incr_period = INCPERIOD_100MHZ; ++ } else { ++ def_period = div_u64(1000000000ULL, ptp_clock); ++ period = div_u64(1000000000ULL, adj_clock); ++ if (def_period == period) ++ return 0; ++ ++ incr_period = 1; ++ incr_val = (def_period * def_period) / period; ++ } ++ ++ val = (incr_val | (incr_period << INCR_PERIOD_OFST)); ++ writel(val, ioaddr + PTP_INRC_ATTR); ++ ++ return 0; +} + -+static int bm_validate_ucast_entries(struct device *dev, int ucast_entries) ++static u64 emac_hw_get_systime(struct emac_priv *priv) +{ -+ int x = ucast_entries; ++ void __iomem *ioaddr = priv->iobase; ++ u64 systimel, systimeh; ++ u64 systim; + -+ switch (x) { -+ case 1 ... 32: -+ case 64: -+ case 128: -+ break; -+ default: -+ x = 1; -+ dev_info(dev, "Unicast table entries set to unexpected value %d\n", -+ ucast_entries); -+ break; -+ } -+ return x; ++ /* update system time adjust low register */ ++ systimel = readl(ioaddr + SYS_TIME_GET_LOW); ++ systimeh = readl(ioaddr + SYS_TIME_GET_HI); ++ /* perform system time adjust */ ++ systim = (systimeh << 32) | systimel; ++ ++ return systim; +} + -+static int bm_validate_mcast_bins(struct device *dev, int mcast_bins) ++static u64 emac_hw_get_phc_time(struct emac_priv *priv) +{ -+ int x = mcast_bins; ++ unsigned long flags; ++ u64 cycles, ns; + -+ switch (x) { -+ case HASH_TABLE_SIZE: -+ case 128: -+ case 256: -+ break; -+ default: -+ x = 0; -+ dev_info(dev, "Hash table entries set to unexpected value %d\n", -+ mcast_bins); -+ break; -+ } -+ return x; ++ spin_lock_irqsave(&priv->ptp_lock, flags); ++ /* first read system time low register */ ++ cycles = emac_hw_get_systime(priv); ++ ns = timecounter_cyc2time(&priv->tc, cycles); ++ spin_unlock_irqrestore(&priv->ptp_lock, flags); ++ ++ return ns; +} + -+static void bm_dwmac_probe_config_dt(struct platform_device *pdev, struct plat_stmmacenet_data *plat) ++static u64 emac_hw_get_tx_timestamp(struct emac_priv *priv) +{ -+ struct device_node *np = pdev->dev.of_node; ++ void __iomem *ioaddr = priv->iobase; ++ unsigned long flags; ++ u64 systimel, systimeh; ++ u64 systim; ++ u64 ns; + -+ of_property_read_u32(np, "snps,multicast-filter-bins", &plat->multicast_filter_bins); -+ of_property_read_u32(np, "snps,perfect-filter-entries", &plat->unicast_filter_entries); -+ plat->unicast_filter_entries = bm_validate_ucast_entries(&pdev->dev, -+ plat->unicast_filter_entries); -+ plat->multicast_filter_bins = bm_validate_mcast_bins(&pdev->dev, -+ plat->multicast_filter_bins); -+ plat->flags |= (STMMAC_FLAG_TSO_EN); -+ plat->has_gmac4 = 1; -+ plat->has_gmac = 0; -+ plat->pmt = 0; ++ /* first read system time low register */ ++ systimel = readl(ioaddr + TX_TIMESTAMP_LOW); ++ systimeh = readl(ioaddr + TX_TIMESTAMP_HI); ++ systim = (systimeh << 32) | systimel; ++ ++ spin_lock_irqsave(&priv->ptp_lock, flags); ++ ns = timecounter_cyc2time(&priv->tc, systim); ++ spin_unlock_irqrestore(&priv->ptp_lock, flags); ++ ++ return ns; +} + -+static int bm_dwmac_probe(struct platform_device *pdev) ++static u64 emac_hw_get_rx_timestamp(struct emac_priv *priv) +{ -+ struct plat_stmmacenet_data *plat_dat; -+ struct stmmac_resources stmmac_res; -+ struct bm_mac *bsp_priv = NULL; -+ struct phy_device *phydev = NULL; -+ struct stmmac_priv *priv = NULL; -+ struct net_device *ndev = NULL; -+ int ret; -+ -+ pdev->dev.dma_mask = &bm_dma_mask; -+ pdev->dev.coherent_dma_mask = bm_dma_mask; ++ void __iomem *ioaddr = priv->iobase; ++ unsigned long flags; ++ u64 systimel, systimeh; ++ u64 systim; ++ u64 ns; + -+ bm_eth_reset_phy(pdev); ++ /* first read system time low register */ ++ systimel = readl(ioaddr + RX_TIMESTAMP_LOW); ++ systimeh = readl(ioaddr + RX_TIMESTAMP_HI); ++ systim = (systimeh << 32) | systimel; + -+ ret = stmmac_get_platform_resources(pdev, &stmmac_res); -+ if (ret) -+ return ret; ++ spin_lock_irqsave(&priv->ptp_lock, flags); ++ ns = timecounter_cyc2time(&priv->tc, systim); ++ spin_unlock_irqrestore(&priv->ptp_lock, flags); + -+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); -+ if (IS_ERR(plat_dat)) -+ return PTR_ERR(plat_dat); ++ return ns; ++} + -+ bm_dwmac_probe_config_dt(pdev, plat_dat); -+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); -+ if (ret) -+ goto err_remove_config_dt; ++static u64 emac_cyclecounter_read(const struct cyclecounter *cc) ++{ ++ struct emac_priv *priv = container_of(cc, struct emac_priv, cc); + -+ bsp_priv = devm_kzalloc(&pdev->dev, sizeof(*bsp_priv), GFP_KERNEL); -+ if (!bsp_priv) -+ return PTR_ERR(bsp_priv); ++ return emac_hw_get_systime(priv); ++} + -+ bsp_priv->dev = &pdev->dev; ++static int emac_hw_init_systime(struct emac_priv *priv, u64 set_ns) ++{ ++ unsigned long flags; + -+ /* clock setup */ -+ bsp_priv->clk_tx = devm_clk_get(&pdev->dev, -+ "clk_tx"); -+ if (IS_ERR(bsp_priv->clk_tx)) -+ dev_warn(&pdev->dev, "Cannot get mac tx clock!\n"); -+ else -+ plat_dat->fix_mac_speed = bm_mac_fix_speed; ++ spin_lock_irqsave(&priv->ptp_lock, flags); ++ timecounter_init(&priv->tc, &priv->cc, set_ns); ++ spin_unlock_irqrestore(&priv->ptp_lock, flags); + -+ bsp_priv->gate_clk_tx = devm_clk_get(&pdev->dev, "gate_clk_tx"); -+ if (IS_ERR(bsp_priv->gate_clk_tx)) -+ dev_warn(&pdev->dev, "Cannot get mac tx gating clock!\n"); -+ else -+ clk_prepare_enable(bsp_priv->gate_clk_tx); ++ return 0; ++} + -+ bsp_priv->gate_clk_ref = devm_clk_get(&pdev->dev, "gate_clk_ref"); -+ if (IS_ERR(bsp_priv->gate_clk_ref)) -+ dev_warn(&pdev->dev, "Cannot get mac ref gating clock!\n"); -+ else -+ clk_prepare_enable(bsp_priv->gate_clk_ref); ++static struct emac_hw_ptp emac_hwptp = { ++ .config_hw_tstamping = emac_hw_timestamp_config, ++ .config_systime_increment = emac_hw_config_systime_increment, ++ .init_systime = emac_hw_init_systime, ++ .get_phc_time = emac_hw_get_phc_time, ++ .get_tx_timestamp = emac_hw_get_tx_timestamp, ++ .get_rx_timestamp = emac_hw_get_rx_timestamp, ++}; + -+ plat_dat->bsp_priv = bsp_priv; -+ plat_dat->exit = bm_dwmac_exit; ++static int emac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb) ++{ ++ struct emac_priv *priv = container_of(ptp, struct emac_priv, ptp_clock_ops); ++ void __iomem *ioaddr = priv->iobase; ++ unsigned long flags; ++ u32 addend, incvalue; ++ int neg_adj = 0; ++ u64 adj; + -+ ndev = dev_get_drvdata(&pdev->dev); -+ priv = netdev_priv(ndev); -+ phydev = mdiobus_get_phy(priv->mii, 0); -+ if (phydev == NULL) { -+ dev_err(&pdev->dev, "Can not get phy in addr 0\n"); -+ goto err_remove_config_dt; ++ if (ppb > ptp->max_adj || (ppb <= -1000000000)) ++ return -EINVAL; ++ if (ppb < 0) { ++ neg_adj = 1; ++ ppb = -ppb; + } + -+ /* set green LED0 active for transmit, yellow LED1 for link*/ -+ ret = phy_write_paged(phydev, 0, 0x1f, 0xd04); -+ if (ret < 0) -+ dev_err(&pdev->dev, "Can not select page 0xd04\n"); -+ ret = phy_write_paged(phydev, 0xd04, 0x10, 0x617f); -+ if (ret < 0) -+ dev_err(&pdev->dev, "Can not alter LED Configuration\n"); -+ /* disable eee LED function */ -+ ret = phy_write_paged(phydev, 0xd04, 0x11, 0x0); -+ if (ret < 0) -+ dev_err(&pdev->dev, "Can not disable EEE Configuration\n"); -+ ret = phy_write_paged(phydev, 0, 0x1f, 0); -+ if (ret < 0) -+ dev_err(&pdev->dev, "Can not select page 0\n"); ++ spin_lock_irqsave(&priv->ptp_lock, flags); ++ ++ /* ppb = (Fnew - F0)/F0 ++ * diff = F0 * ppb ++ */ ++ incvalue = INCVALUE_100MHZ << INCVALUE_SHIFT_100MHZ; ++ adj = incvalue; ++ adj *= ppb; ++ adj = div_u64(adj, 1000000000); ++ addend = neg_adj ? (incvalue - adj) : (incvalue + adj); ++ addend = (addend | (INCPERIOD_100MHZ << INCR_PERIOD_OFST)); ++ writel(addend, ioaddr + PTP_INRC_ATTR); ++ ++ spin_unlock_irqrestore(&priv->ptp_lock, flags); + + return 0; ++} + -+err_remove_config_dt: -+ stmmac_remove_config_dt(pdev, plat_dat); ++static int emac_adjust_fine(struct ptp_clock_info *ptp, long scaled_ppm) ++{ ++ s32 ppb; + -+ return ret; ++ ppb = scaled_ppm_to_ppb(scaled_ppm); ++ return emac_adjust_freq(ptp, ppb); +} + -+static const struct of_device_id bm_dwmac_match[] = { -+ { .compatible = "bitmain,ethernet" }, -+ { } -+}; -+MODULE_DEVICE_TABLE(of, bm_dwmac_match); ++static int emac_adjust_time(struct ptp_clock_info *ptp, s64 delta) ++{ ++ struct emac_priv *priv = container_of(ptp, struct emac_priv, ptp_clock_ops); ++ unsigned long flags; + -+static struct platform_driver bm_dwmac_driver = { -+ .probe = bm_dwmac_probe, -+ .remove_new = stmmac_pltfr_remove, -+ .driver = { -+ .name = "bm-dwmac", -+ .pm = &stmmac_pltfr_pm_ops, -+ .of_match_table = bm_dwmac_match, -+ }, -+}; -+module_platform_driver(bm_dwmac_driver); ++ spin_lock_irqsave(&priv->ptp_lock, flags); ++ timecounter_adjtime(&priv->tc, delta); ++ spin_unlock_irqrestore(&priv->ptp_lock, flags); + -+MODULE_AUTHOR("Wei Huang"); -+MODULE_DESCRIPTION("Bitmain DWMAC specific glue layer"); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-xuantie.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-xuantie.c -new file mode 100644 -index 000000000000..ac5b3b968bf6 ---- /dev/null -+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-xuantie.c -@@ -0,0 +1,584 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * XuanTie DWMAC platform driver -+ * -+ * Copyright (C) 2021 Alibaba Group Holding Limited. -+ * Copyright (C) 2023 Jisheng Zhang -+ * -+ */ ++ return 0; ++} + -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include ++static int emac_phc_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) ++{ ++ struct emac_priv *priv = container_of(ptp, struct emac_priv, ptp_clock_ops); ++ unsigned long flags; ++ u64 cycles, ns; + -+#include "stmmac_platform.h" ++ spin_lock_irqsave(&priv->ptp_lock, flags); ++ cycles = emac_hw_get_systime(priv); ++ ns = timecounter_cyc2time(&priv->tc, cycles); ++ spin_unlock_irqrestore(&priv->ptp_lock, flags); ++ *ts = ns_to_timespec64(ns); + -+#define GMAC_CLK_EN 0x00 -+#define GMAC_TX_CLK_EN BIT(1) -+#define GMAC_TX_CLK_N_EN BIT(2) -+#define GMAC_TX_CLK_OUT_EN BIT(3) -+#define GMAC_RX_CLK_EN BIT(4) -+#define GMAC_RX_CLK_N_EN BIT(5) -+#define GMAC_EPHY_REF_CLK_EN BIT(6) -+#define GMAC_RXCLK_DELAY_CTRL 0x04 -+#define GMAC_RXCLK_BYPASS BIT(15) -+#define GMAC_RXCLK_INVERT BIT(14) -+#define GMAC_RXCLK_DELAY_MASK GENMASK(4, 0) -+#define GMAC_RXCLK_DELAY_VAL(x) FIELD_PREP(GMAC_RXCLK_DELAY_MASK, (x)) -+#define GMAC_TXCLK_DELAY_CTRL 0x08 -+#define GMAC_TXCLK_BYPASS BIT(15) -+#define GMAC_TXCLK_INVERT BIT(14) -+#define GMAC_TXCLK_DELAY_MASK GENMASK(4, 0) -+#define GMAC_TXCLK_DELAY_VAL(x) FIELD_PREP(GMAC_RXCLK_DELAY_MASK, (x)) -+#define GMAC_PLLCLK_DIV 0x0c -+#define GMAC_PLLCLK_DIV_EN BIT(31) -+#define GMAC_PLLCLK_DIV_MASK GENMASK(7, 0) -+#define GMAC_PLLCLK_DIV_NUM(x) FIELD_PREP(GMAC_PLLCLK_DIV_MASK, (x)) -+#define GMAC_CLK_PTP 0x14 -+#define GMAC_CLK_PTP_DIV_EN BIT(31) -+#define GMAC_CLK_PTP_DIV_MASK GENMASK(7, 0) -+#define GMAC_CLK_PTP_DIV_NUM(x) FIELD_PREP(GMAC_CLK_PTP_DIV_MASK, (x)) -+#define GMAC_GTXCLK_SEL 0x18 -+#define GMAC_GTXCLK_SEL_PLL BIT(0) -+#define GMAC_INTF_CTRL 0x1c -+#define PHY_INTF_MASK BIT(0) -+#define PHY_INTF_RGMII FIELD_PREP(PHY_INTF_MASK, 1) -+#define PHY_INTF_MII_GMII FIELD_PREP(PHY_INTF_MASK, 0) -+#define GMAC_TXCLK_OEN 0x20 -+#define TXCLK_DIR_MASK BIT(0) -+#define TXCLK_DIR_OUTPUT FIELD_PREP(TXCLK_DIR_MASK, 0) -+#define TXCLK_DIR_INPUT FIELD_PREP(TXCLK_DIR_MASK, 1) ++ return 0; ++} + -+#define GMAC_GMII_RGMII_RATE 125000000 -+#define GMAC_MII_RATE 25000000 -+#define GMAC_PTP_CLK_RATE 50000000 //50MHz ++static int emac_phc_set_time(struct ptp_clock_info *ptp, const struct timespec64 *ts) ++{ ++ struct emac_priv *priv = container_of(ptp, struct emac_priv, ptp_clock_ops); ++ unsigned long flags; ++ u64 ns; + -+struct th1520_dwmac { -+ struct plat_stmmacenet_data *plat; -+ struct regmap *apb_regmap; -+ struct device *dev; -+ u32 rx_delay; -+ u32 tx_delay; -+ struct clk *gmac_axi_aclk; -+ struct clk *gmac_axi_pclk; -+}; ++ ns = timespec64_to_ns(ts); ++ spin_lock_irqsave(&priv->ptp_lock, flags); ++ timecounter_init(&priv->tc, &priv->cc, ns); ++ spin_unlock_irqrestore(&priv->ptp_lock, flags); + -+#define pm_debug dev_dbg /* for suspend/resume interface debug info */ ++ return 0; ++} + -+static int th1520_dwmac_set_phy_if(struct plat_stmmacenet_data *plat) ++static void emac_systim_overflow_work(struct work_struct *work) +{ -+ struct th1520_dwmac *dwmac = plat->bsp_priv; -+ u32 phyif; ++ struct emac_priv *priv = container_of(work, struct emac_priv, systim_overflow_work.work); ++ struct timespec64 ts; ++ u64 ns; + -+ switch (plat->mac_interface) { -+ case PHY_INTERFACE_MODE_MII: -+ phyif = PHY_INTF_MII_GMII; -+ break; -+ case PHY_INTERFACE_MODE_RGMII: -+ case PHY_INTERFACE_MODE_RGMII_ID: -+ case PHY_INTERFACE_MODE_RGMII_TXID: -+ case PHY_INTERFACE_MODE_RGMII_RXID: -+ phyif = PHY_INTF_RGMII; -+ break; -+ default: -+ dev_err(dwmac->dev, "unsupported phy interface %d\n", -+ plat->mac_interface); -+ return -EINVAL; -+ }; ++ ns = timecounter_read(&priv->tc); ++ ts = ns_to_timespec64(ns); ++ pr_debug("SYSTIM overflow check at %lld.%09lu\n", ++ (long long)ts.tv_sec, ts.tv_nsec); ++ schedule_delayed_work(&priv->systim_overflow_work, EMAC_SYSTIM_OVERFLOW_PERIOD); ++} + -+ regmap_write(dwmac->apb_regmap, GMAC_INTF_CTRL, phyif); ++static struct ptp_clock_info emac_ptp_clock_ops = { ++ .owner = THIS_MODULE, ++ .name = "emac_ptp_clock", ++ .max_adj = 1000000000, ++ .n_alarm = 0, ++ .n_ext_ts = 0, ++ .n_per_out = 0, ++ .n_pins = 0, ++ .pps = 0, ++ .adjfine = emac_adjust_fine, ++ .adjtime = emac_adjust_time, ++ .gettime64 = emac_phc_get_time, ++ .settime64 = emac_phc_set_time, ++}; + -+ return 0; ++static void emac_ptp_register(struct emac_priv *priv) ++{ ++ unsigned long flags; ++ ++ priv->cc.read = emac_cyclecounter_read; ++ priv->cc.mask = CYCLECOUNTER_MASK(64); ++ priv->cc.mult = 1; ++ priv->cc.shift = INCVALUE_SHIFT_100MHZ; ++ spin_lock_init(&priv->ptp_lock); ++ priv->ptp_clock_ops = emac_ptp_clock_ops; ++ ++ INIT_DELAYED_WORK(&priv->systim_overflow_work, emac_systim_overflow_work); ++ schedule_delayed_work(&priv->systim_overflow_work, EMAC_SYSTIM_OVERFLOW_PERIOD); ++ priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops, NULL); ++ if (IS_ERR(priv->ptp_clock)) { ++ netdev_err(priv->ndev, "ptp_clock_register failed\n"); ++ priv->ptp_clock = NULL; ++ } else if (priv->ptp_clock) { ++ netdev_info(priv->ndev, "registered PTP clock\n"); ++ } else { ++ netdev_info(priv->ndev, "PTP_1588_CLOCK maybe not enabled\n"); ++ } ++ ++ spin_lock_irqsave(&priv->ptp_lock, flags); ++ timecounter_init(&priv->tc, &priv->cc, ktime_to_ns(ktime_get_real())); ++ spin_unlock_irqrestore(&priv->ptp_lock, flags); ++ priv->hwptp = &emac_hwptp; +} + -+static int th1520_dwmac_set_txclk_dir(struct plat_stmmacenet_data *plat) ++static void emac_ptp_unregister(struct emac_priv *priv) +{ -+ struct th1520_dwmac *dwmac = plat->bsp_priv; -+ u32 txclk_dir; ++ cancel_delayed_work_sync(&priv->systim_overflow_work); ++ if (priv->ptp_clock) { ++ ptp_clock_unregister(priv->ptp_clock); ++ priv->ptp_clock = NULL; ++ pr_debug("Removed PTP HW clock successfully on %s\n", priv->ndev->name); ++ } ++ priv->hwptp = NULL; ++} + -+ switch (plat->mac_interface) { -+ case PHY_INTERFACE_MODE_MII: -+ txclk_dir = TXCLK_DIR_INPUT; -+ break; -+ case PHY_INTERFACE_MODE_RGMII: -+ case PHY_INTERFACE_MODE_RGMII_ID: -+ case PHY_INTERFACE_MODE_RGMII_TXID: -+ case PHY_INTERFACE_MODE_RGMII_RXID: -+ txclk_dir = TXCLK_DIR_OUTPUT; -+ break; -+ default: -+ dev_err(dwmac->dev, "unsupported phy interface %d\n", -+ plat->mac_interface); -+ return -EINVAL; -+ }; ++static bool emac_is_rmii(struct emac_priv *priv) ++{ ++ return priv->phy_interface == PHY_INTERFACE_MODE_RMII; ++} + -+ regmap_write(dwmac->apb_regmap, GMAC_TXCLK_OEN, txclk_dir); ++static void emac_enable_axi_single_id_mode(struct emac_priv *priv, int en) ++{ ++ u32 val; + -+ return 0; ++ val = readl(priv->ctrl_reg); ++ if (en) ++ val |= AXI_SINGLE_ID; ++ else ++ val &= ~AXI_SINGLE_ID; ++ writel(val, priv->ctrl_reg); +} + -+static void th1520_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode) ++static void emac_phy_interface_config(struct emac_priv *priv) +{ -+ struct th1520_dwmac *dwmac = priv; -+ struct plat_stmmacenet_data *plat = dwmac->plat; -+ unsigned long rate; -+ u32 div; ++ u32 val; + -+ switch (plat->mac_interface) { -+ /* For MII, rxc/txc is provided by phy */ -+ case PHY_INTERFACE_MODE_MII: -+ return; ++ val = readl(priv->ctrl_reg); ++ if (emac_is_rmii(priv)) { ++ val &= ~PHY_INTF_RGMII; ++ if (priv->ref_clk_frm_soc) ++ val |= REF_CLK_SEL; ++ else ++ val &= ~REF_CLK_SEL; ++ } else { ++ val |= PHY_INTF_RGMII; ++ if (priv->ref_clk_frm_soc) ++ val |= RGMII_TX_CLK_SEL; ++ } ++ writel(val, priv->ctrl_reg); ++} ++ ++static int emac_reset_hw(struct emac_priv *priv) ++{ ++ /* disable all the interrupts */ ++ emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000); ++ emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0000); ++ ++ /* disable transmit and receive units */ ++ emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0000); ++ emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0000); ++ ++ /* stop the DMA */ ++ emac_wr(priv, DMA_CONTROL, 0x0000); ++ ++ /* reset mac, statistic counters */ ++ emac_wr(priv, MAC_GLOBAL_CONTROL, 0x0018); ++ emac_wr(priv, MAC_GLOBAL_CONTROL, 0x0000); ++ ++ return 0; ++} ++ ++static int emac_init_hw(struct emac_priv *priv) ++{ ++ u32 val = 0; ++ ++ emac_enable_axi_single_id_mode(priv, 1); ++ ++ /* disable transmit and receive units */ ++ emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0000); ++ emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0000); ++ ++ /* enable mac address 1 filtering */ ++ emac_wr(priv, MAC_ADDRESS_CONTROL, MREGBIT_MAC_ADDRESS1_ENABLE); ++ ++ /* zero initialize the multicast hash table */ ++ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0); ++ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0); ++ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0); ++ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0); ++ emac_wr(priv, MAC_TRANSMIT_FIFO_ALMOST_FULL, 0x1f8); ++ emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD, priv->tx_threshold); ++ emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, priv->rx_threshold); ++ ++ /* set emac rx mitigation frame count */ ++ val = priv->rx_coal_frames & MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MSK; ++ ++ /* set emac rx mitigation timeout */ ++ val |= ((priv->rx_coal_timeout * AXI_CLK_CYCLES_PER_US) << ++ MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_OFST) & ++ MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MSK; ++ ++ /* enable emac rx irq mitigation */ ++ val |= MRGEBIT_RECEIVE_IRQ_MITIGATION_ENABLE; ++ ++ emac_wr(priv, DMA_RECEIVE_IRQ_MITIGATION_CTRL, val); ++ emac_wr(priv, MAC_FC_CONTROL, MREGBIT_FC_DECODE_ENABLE); ++ ++ /* reset dma */ ++ emac_wr(priv, DMA_CONTROL, 0x0000); ++ emac_wr(priv, DMA_CONFIGURATION, 0x01); ++ usleep_range(9000, 10000); ++ emac_wr(priv, DMA_CONFIGURATION, 0x00); ++ usleep_range(9000, 10000); ++ ++ val = MREGBIT_STRICT_BURST | MREGBIT_DMA_64BIT_MODE; ++ if (priv->dma_burst_len) ++ val |= 1 << priv->dma_burst_len; ++ else ++ val |= MREGBIT_BURST_1WORD; ++ emac_wr(priv, DMA_CONFIGURATION, val); ++ ++ /* if emac has ptp 1588 support, so enable PTP 1588 irq */ ++ if (priv->ptp_support) ++ emac_wr(priv, PTP_1588_IRQ_EN, PTP_TX_TIMESTAMP | PTP_RX_TIMESTAMP); ++ ++ return 0; ++} ++ ++static int emac_set_mac_addr(struct emac_priv *priv, const unsigned char *addr) ++{ ++ emac_wr(priv, MAC_ADDRESS1_HIGH, ((addr[1] << 8) | addr[0])); ++ emac_wr(priv, MAC_ADDRESS1_MED, ((addr[3] << 8) | addr[2])); ++ emac_wr(priv, MAC_ADDRESS1_LOW, ((addr[5] << 8) | addr[4])); ++ ++ return 0; ++} ++ ++static void emac_dma_start_transmit(struct emac_priv *priv) ++{ ++ emac_wr(priv, DMA_TRANSMIT_POLL_DEMAND, 0xFF); ++} ++ ++static void emac_enable_interrupt(struct emac_priv *priv) ++{ ++ u32 val; ++ ++ val = emac_rd(priv, DMA_INTERRUPT_ENABLE); ++ val |= MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE; ++ val |= MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE; ++ emac_wr(priv, DMA_INTERRUPT_ENABLE, val); ++} ++ ++static void emac_disable_interrupt(struct emac_priv *priv) ++{ ++ u32 val; ++ ++ val = emac_rd(priv, DMA_INTERRUPT_ENABLE); ++ val &= ~MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE; ++ val &= ~MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE; ++ emac_wr(priv, DMA_INTERRUPT_ENABLE, val); ++} ++ ++static inline u32 emac_tx_avail(struct emac_priv *priv) ++{ ++ struct emac_desc_ring *tx_ring = &priv->tx_ring; ++ u32 avail; ++ ++ if (tx_ring->tail > tx_ring->head) ++ avail = tx_ring->tail - tx_ring->head - 1; ++ else ++ avail = tx_ring->total_cnt - tx_ring->head + tx_ring->tail - 1; ++ ++ return avail; ++} ++ ++static void emac_tx_coal_timer_resched(struct emac_priv *priv) ++{ ++ mod_timer(&priv->txtimer, jiffies + usecs_to_jiffies(priv->tx_coal_timeout)); ++} ++ ++static void emac_tx_coal_timer(struct timer_list *t) ++{ ++ struct emac_priv *priv = from_timer(priv, t, txtimer); ++ ++ if (likely(napi_schedule_prep(&priv->napi))) ++ __napi_schedule(&priv->napi); ++} ++ ++static int emac_tx_coal(struct emac_priv *priv, u32 pkt_num) ++{ ++ /* Manage tx mitigation */ ++ priv->tx_count_frames += pkt_num; ++ if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { ++ emac_tx_coal_timer_resched(priv); ++ return false; ++ } ++ ++ priv->tx_count_frames = 0; ++ return true; ++} ++ ++static void emac_get_tx_hwtstamp(struct emac_priv *priv, struct sk_buff *skb) ++{ ++ struct skb_shared_hwtstamps shhwtstamp; ++ u64 ns; ++ ++ if (!priv->hwts_tx_en) ++ return; ++ ++ /* exit if skb doesn't support hw tstamp */ ++ if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) ++ return; ++ ++ /* get the valid tstamp */ ++ ns = priv->hwptp->get_tx_timestamp(priv); ++ memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); ++ shhwtstamp.hwtstamp = ns_to_ktime(ns); ++ /* pass tstamp to stack */ ++ skb_tstamp_tx(skb, &shhwtstamp); ++} ++ ++static void emac_get_rx_hwtstamp(struct emac_priv *priv, struct emac_rx_desc *p, ++ struct sk_buff *skb) ++{ ++ struct skb_shared_hwtstamps *shhwtstamp = NULL; ++ u64 ns; ++ ++ if (!priv->hwts_rx_en) ++ return; ++ ++ /* Check if timestamp is available */ ++ if (p->ptp_pkt && p->rx_timestamp) { ++ ns = priv->hwptp->get_rx_timestamp(priv); ++ netdev_dbg(priv->ndev, "get valid RX hw timestamp %llu\n", ns); ++ shhwtstamp = skb_hwtstamps(skb); ++ memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); ++ shhwtstamp->hwtstamp = ns_to_ktime(ns); ++ } else { ++ netdev_dbg(priv->ndev, "cannot get RX hw timestamp\n"); ++ } ++} ++ ++static int emac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) ++{ ++ struct emac_priv *priv = netdev_priv(dev); ++ struct hwtstamp_config config; ++ struct timespec64 now; ++ u64 ns_ptp; ++ u32 ptp_event_msg_id = 0; ++ u32 rx_ptp_type = 0; ++ ++ if (!priv->ptp_support) { ++ netdev_alert(priv->ndev, "No support for HW time stamping\n"); ++ priv->hwts_tx_en = 0; ++ priv->hwts_rx_en = 0; ++ ++ return -EOPNOTSUPP; ++ } ++ ++ if (copy_from_user(&config, ifr->ifr_data, sizeof(struct hwtstamp_config))) ++ return -EFAULT; ++ ++ netdev_dbg(priv->ndev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", ++ __func__, config.flags, config.tx_type, config.rx_filter); ++ ++ /* reserved for future extensions */ ++ if (config.flags) ++ return -EINVAL; ++ ++ if (config.tx_type != HWTSTAMP_TX_OFF && config.tx_type != HWTSTAMP_TX_ON) ++ return -ERANGE; ++ ++ switch (config.rx_filter) { ++ case HWTSTAMP_FILTER_NONE: ++ /* time stamp no incoming packet at all */ ++ config.rx_filter = HWTSTAMP_FILTER_NONE; ++ break; ++ ++ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: ++ /* PTP v1, UDP, Sync packet */ ++ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; ++ /* take time stamp for SYNC messages only */ ++ ptp_event_msg_id = MSG_SYNC; ++ rx_ptp_type = PTP_V1_L4_ONLY; ++ break; ++ ++ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: ++ /* PTP v1, UDP, Delay_req packet */ ++ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; ++ /* take time stamp for Delay_Req messages only */ ++ ptp_event_msg_id = MSG_DELAY_REQ; ++ rx_ptp_type = PTP_V1_L4_ONLY; ++ break; ++ ++ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: ++ /* PTP v2, UDP, Sync packet */ ++ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; ++ /* take time stamp for SYNC messages only */ ++ ptp_event_msg_id = MSG_SYNC; ++ rx_ptp_type = PTP_V2_L2_L4; ++ break; ++ ++ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: ++ /* PTP v2, UDP, Delay_req packet */ ++ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; ++ /* take time stamp for Delay_Req messages only */ ++ ptp_event_msg_id = MSG_DELAY_REQ; ++ rx_ptp_type = PTP_V2_L2_L4; ++ break; ++ ++ case HWTSTAMP_FILTER_PTP_V2_EVENT: ++ /* PTP v2/802.AS1 any layer, any kind of event packet */ ++ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; ++ ptp_event_msg_id = ALL_EVENTS; ++ rx_ptp_type = PTP_V2_L2_L4; ++ break; ++ ++ case HWTSTAMP_FILTER_PTP_V2_SYNC: ++ /* PTP v2/802.AS1, any layer, Sync packet */ ++ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; ++ /* take time stamp for SYNC messages only */ ++ ptp_event_msg_id = MSG_SYNC; ++ rx_ptp_type = PTP_V2_L2_L4; ++ break; ++ ++ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: ++ /* PTP v2/802.AS1, any layer, Delay_req packet */ ++ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; ++ /* take time stamp for Delay_Req messages only */ ++ ptp_event_msg_id = MSG_DELAY_REQ; ++ rx_ptp_type = PTP_V2_L2_L4; ++ break; ++ ++ default: ++ return -ERANGE; ++ } ++ ++ priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); ++ priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; ++ ++ if (!priv->hwts_tx_en && !priv->hwts_rx_en) { ++ priv->hwptp->config_hw_tstamping(priv, 0, 0, 0); ++ } else { ++ priv->hwptp->config_hw_tstamping(priv, 1, rx_ptp_type, ptp_event_msg_id); ++ ++ /* initialize system time */ ++ ktime_get_real_ts64(&now); ++ priv->hwptp->init_systime(priv, timespec64_to_ns(&now)); ++ ++ /* program Increment reg */ ++ priv->hwptp->config_systime_increment(priv, priv->ptp_clk_rate, priv->ptp_clk_rate); ++ ++ ns_ptp = priv->hwptp->get_phc_time(priv); ++ ktime_get_real_ts64(&now); ++ /* check the diff between ptp timer and system time */ ++ if (abs(timespec64_to_ns(&now) - ns_ptp) > 5000) ++ priv->hwptp->init_systime(priv, timespec64_to_ns(&now)); ++ } ++ return copy_to_user(ifr->ifr_data, &config, sizeof(struct hwtstamp_config)) ? -EFAULT : 0; ++} ++ ++static irqreturn_t emac_interrupt_handler(int irq, void *dev_id) ++{ ++ struct net_device *ndev = (struct net_device *)dev_id; ++ struct emac_priv *priv = netdev_priv(ndev); ++ u32 status; ++ u32 clr = 0; ++ ++ /* Check if emac is up */ ++ if (test_bit(EMAC_DOWN, &priv->state)) ++ return IRQ_HANDLED; ++ ++ /* read the status register for IRQ received */ ++ status = emac_rd(priv, DMA_STATUS_IRQ); ++ ++ if (status & MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ) { ++ emac_disable_interrupt(priv); ++ clr |= MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ; ++ napi_schedule(&priv->napi); ++ } ++ ++ if (status & MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ) ++ clr |= MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ; ++ ++ if (status & MREGBIT_TRANSMIT_DMA_STOPPED_IRQ) ++ clr |= MREGBIT_TRANSMIT_DMA_STOPPED_IRQ; ++ ++ if (status & MREGBIT_RECEIVE_TRANSFER_DONE_IRQ) { ++ emac_disable_interrupt(priv); ++ clr |= MREGBIT_RECEIVE_TRANSFER_DONE_IRQ; ++ napi_schedule(&priv->napi); ++ } ++ ++ if (status & MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ) ++ clr |= MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ; ++ ++ if (status & MREGBIT_RECEIVE_DMA_STOPPED_IRQ) ++ clr |= MREGBIT_RECEIVE_DMA_STOPPED_IRQ; ++ ++ if (status & MREGBIT_RECEIVE_MISSED_FRAME_IRQ) ++ clr |= MREGBIT_RECEIVE_MISSED_FRAME_IRQ; ++ ++ emac_wr(priv, DMA_STATUS_IRQ, clr); ++ ++ if (priv->ptp_support) { ++ status = emac_rd(priv, PTP_1588_IRQ_STS); ++ if ((status & PTP_TX_TIMESTAMP) || (status & PTP_RX_TIMESTAMP)) ++ napi_schedule(&priv->napi); ++ ++ emac_wr(priv, PTP_1588_IRQ_STS, status); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static void emac_configure_tx(struct emac_priv *priv) ++{ ++ u32 val; ++ ++ /* set the transmit base address */ ++ val = (u32)(priv->tx_ring.desc_dma_addr); ++ emac_wr(priv, DMA_TRANSMIT_BASE_ADDRESS, val); ++ ++ /* Tx Inter Packet Gap value and enable the transmit */ ++ val = emac_rd(priv, MAC_TRANSMIT_CONTROL); ++ val &= (~MREGBIT_IFG_LEN); ++ val |= MREGBIT_TRANSMIT_ENABLE; ++ val |= MREGBIT_TRANSMIT_AUTO_RETRY; ++ emac_wr(priv, MAC_TRANSMIT_CONTROL, val); ++ emac_wr(priv, DMA_TRANSMIT_AUTO_POLL_COUNTER, 0x00); ++ ++ /* start tx dma */ ++ val = emac_rd(priv, DMA_CONTROL); ++ val |= MREGBIT_START_STOP_TRANSMIT_DMA; ++ emac_wr(priv, DMA_CONTROL, val); ++} ++ ++static void emac_configure_rx(struct emac_priv *priv) ++{ ++ u32 val; ++ ++ /* set the receive base address */ ++ val = (u32)(priv->rx_ring.desc_dma_addr); ++ emac_wr(priv, DMA_RECEIVE_BASE_ADDRESS, val); ++ ++ /* enable the receive */ ++ val = emac_rd(priv, MAC_RECEIVE_CONTROL); ++ val |= MREGBIT_RECEIVE_ENABLE; ++ val |= MREGBIT_STORE_FORWARD; ++ emac_wr(priv, MAC_RECEIVE_CONTROL, val); ++ ++ /* start rx dma */ ++ val = emac_rd(priv, DMA_CONTROL); ++ val |= MREGBIT_START_STOP_RECEIVE_DMA; ++ emac_wr(priv, DMA_CONTROL, val); ++} ++ ++static int emac_free_tx_buf(struct emac_priv *priv, int i) ++{ ++ struct emac_desc_ring *tx_ring; ++ struct emac_tx_desc_buffer *tx_buf; ++ struct desc_buf *buf; ++ int j; ++ ++ tx_ring = &priv->tx_ring; ++ tx_buf = &tx_ring->tx_desc_buf[i]; ++ ++ for (j = 0; j < 2; j++) { ++ buf = &tx_buf->buf[j]; ++ if (buf->dma_addr) { ++ if (buf->map_as_page) ++ dma_unmap_page(&priv->pdev->dev, buf->dma_addr, ++ buf->dma_len, DMA_TO_DEVICE); ++ else ++ dma_unmap_single(&priv->pdev->dev, buf->dma_addr, ++ buf->dma_len, DMA_TO_DEVICE); ++ ++ buf->dma_addr = 0; ++ buf->map_as_page = false; ++ buf->buff_addr = NULL; ++ } ++ } ++ ++ if (tx_buf->skb) { ++ dev_kfree_skb_any(tx_buf->skb); ++ tx_buf->skb = NULL; ++ } ++ ++ return 0; ++} ++ ++static void emac_clean_tx_desc_ring(struct emac_priv *priv) ++{ ++ struct emac_desc_ring *tx_ring = &priv->tx_ring; ++ u32 i; ++ ++ /* Free all the Tx ring sk_buffs */ ++ for (i = 0; i < tx_ring->total_cnt; i++) ++ emac_free_tx_buf(priv, i); ++ ++ tx_ring->head = 0; ++ tx_ring->tail = 0; ++} ++ ++static void emac_clean_rx_desc_ring(struct emac_priv *priv) ++{ ++ struct emac_desc_ring *rx_ring; ++ struct emac_desc_buffer *rx_buf; ++ u32 i; ++ ++ rx_ring = &priv->rx_ring; ++ ++ /* Free all the Rx ring sk_buffs */ ++ for (i = 0; i < rx_ring->total_cnt; i++) { ++ rx_buf = &rx_ring->desc_buf[i]; ++ if (rx_buf->skb) { ++ dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr, ++ rx_buf->dma_len, DMA_FROM_DEVICE); ++ ++ dev_kfree_skb(rx_buf->skb); ++ rx_buf->skb = NULL; ++ } ++ } ++ ++ rx_ring->tail = 0; ++ rx_ring->head = 0; ++} ++ ++static void emac_ptp_init(struct emac_priv *priv) ++{ ++ int ret; ++ ++ if (priv->ptp_support) { ++ ret = clk_prepare_enable(priv->ptp_clk); ++ if (ret < 0) { ++ pr_warn("ptp clock failed to enable\n"); ++ priv->ptp_clk = NULL; ++ } ++ emac_ptp_register(priv); ++ } ++} ++ ++static void emac_ptp_deinit(struct emac_priv *priv) ++{ ++ if (priv->ptp_support) { ++ if (priv->ptp_clk) ++ clk_disable_unprepare(priv->ptp_clk); ++ emac_ptp_unregister(priv); ++ } ++} ++ ++static int emac_down(struct emac_priv *priv) ++{ ++ struct net_device *ndev = priv->ndev; ++ ++ netif_stop_queue(ndev); ++ /* Stop and disconnect the PHY */ ++ if (ndev->phydev) { ++ phy_stop(ndev->phydev); ++ phy_disconnect(ndev->phydev); ++ } ++ ++ priv->link = false; ++ priv->duplex = DUPLEX_UNKNOWN; ++ priv->speed = SPEED_UNKNOWN; ++ ++ napi_disable(&priv->napi); ++ emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000); ++ emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0000); ++ free_irq(priv->irq, ndev); ++ emac_ptp_deinit(priv); ++ emac_reset_hw(priv); ++ netif_carrier_off(ndev); ++ ++ return 0; ++} ++ ++static int emac_alloc_tx_resources(struct emac_priv *priv) ++{ ++ struct emac_desc_ring *tx_ring = &priv->tx_ring; ++ struct platform_device *pdev = priv->pdev; ++ u32 size; ++ ++ size = sizeof(struct emac_tx_desc_buffer) * tx_ring->total_cnt; ++ ++ /* allocate memory */ ++ tx_ring->tx_desc_buf = kzalloc(size, GFP_KERNEL); ++ if (!tx_ring->tx_desc_buf) ++ return -ENOMEM; ++ ++ memset(tx_ring->tx_desc_buf, 0, size); ++ tx_ring->total_size = tx_ring->total_cnt * sizeof(struct emac_tx_desc); ++ EMAC_ROUNDUP(tx_ring->total_size, 1024); ++ tx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, tx_ring->total_size, ++ &tx_ring->desc_dma_addr, GFP_KERNEL); ++ if (!tx_ring->desc_addr) { ++ kfree(tx_ring->tx_desc_buf); ++ return -ENOMEM; ++ } ++ ++ memset(tx_ring->desc_addr, 0, tx_ring->total_size); ++ ++ tx_ring->head = 0; ++ tx_ring->tail = 0; ++ ++ return 0; ++} ++ ++static int emac_alloc_rx_resources(struct emac_priv *priv) ++{ ++ struct emac_desc_ring *rx_ring = &priv->rx_ring; ++ struct platform_device *pdev = priv->pdev; ++ u32 buf_len; ++ ++ buf_len = sizeof(struct emac_desc_buffer) * rx_ring->total_cnt; ++ ++ rx_ring->desc_buf = kzalloc(buf_len, GFP_KERNEL); ++ if (!rx_ring->desc_buf) ++ return -ENOMEM; ++ ++ memset(rx_ring->desc_buf, 0, buf_len); ++ /* round up to nearest 4K */ ++ rx_ring->total_size = rx_ring->total_cnt * sizeof(struct emac_rx_desc); ++ EMAC_ROUNDUP(rx_ring->total_size, 1024); ++ rx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, rx_ring->total_size, ++ &rx_ring->desc_dma_addr, GFP_KERNEL); ++ if (!rx_ring->desc_addr) { ++ kfree(rx_ring->desc_buf); ++ return -ENOMEM; ++ } ++ ++ memset(rx_ring->desc_addr, 0, rx_ring->total_size); ++ ++ rx_ring->head = 0; ++ rx_ring->tail = 0; ++ ++ return 0; ++} ++ ++static void emac_free_tx_resources(struct emac_priv *priv) ++{ ++ emac_clean_tx_desc_ring(priv); ++ kfree(priv->tx_ring.tx_desc_buf); ++ priv->tx_ring.tx_desc_buf = NULL; ++ dma_free_coherent(&priv->pdev->dev, priv->tx_ring.total_size, ++ priv->tx_ring.desc_addr, priv->tx_ring.desc_dma_addr); ++ priv->tx_ring.desc_addr = NULL; ++} ++ ++static void emac_free_rx_resources(struct emac_priv *priv) ++{ ++ emac_clean_rx_desc_ring(priv); ++ kfree(priv->rx_ring.desc_buf); ++ priv->rx_ring.desc_buf = NULL; ++ dma_free_coherent(&priv->pdev->dev, priv->rx_ring.total_size, ++ priv->rx_ring.desc_addr, priv->rx_ring.desc_dma_addr); ++ priv->rx_ring.desc_addr = NULL; ++} ++ ++static int emac_tx_clean_desc(struct emac_priv *priv) ++{ ++ struct emac_desc_ring *tx_ring; ++ struct emac_tx_desc_buffer *tx_buf; ++ struct emac_tx_desc *tx_desc; ++ struct net_device *ndev = priv->ndev; ++ u32 i; ++ ++ netif_tx_lock(ndev); ++ ++ tx_ring = &priv->tx_ring; ++ ++ i = tx_ring->tail; ++ ++ while (i != tx_ring->head) { ++ tx_desc = &((struct emac_tx_desc *)tx_ring->desc_addr)[i]; ++ ++ /* if desc still own by dma, so we quit it */ ++ if (tx_desc->own) ++ break; ++ ++ tx_buf = &tx_ring->tx_desc_buf[i]; ++ ++ if (tx_buf->timestamped && tx_buf->skb) { ++ emac_get_tx_hwtstamp(priv, tx_buf->skb); ++ tx_buf->timestamped = 0; ++ } ++ ++ emac_free_tx_buf(priv, i); ++ memset(tx_desc, 0, sizeof(struct emac_tx_desc)); ++ ++ if (++i == tx_ring->total_cnt) ++ i = 0; ++ } ++ ++ tx_ring->tail = i; ++ ++ if (unlikely(netif_queue_stopped(ndev) && emac_tx_avail(priv) > tx_ring->total_cnt / 4)) ++ netif_wake_queue(ndev); ++ ++ netif_tx_unlock(ndev); ++ ++ return 0; ++} ++ ++static int emac_rx_frame_status(struct emac_priv *priv, struct emac_rx_desc *dsc) ++{ ++ /* if last descritpor isn't set, so we drop it*/ ++ if (!dsc->last_descriptor) { ++ netdev_dbg(priv->ndev, "rx LD bit isn't set, drop it.\n"); ++ return FRAME_DISCARD; ++ } ++ ++ /* A Frame that is less than 64-bytes (from DA thru the FCS field) ++ * is considered as Runt Frame. ++ * Most of the Runt Frames happen because of collisions. ++ */ ++ if (dsc->apllication_status & EMAC_RX_FRAME_RUNT) { ++ netdev_dbg(priv->ndev, "rx frame less than 64.\n"); ++ return FRAME_DISCARD; ++ } ++ ++ /* When the frame fails the CRC check, ++ * the frame is assumed to have the CRC error ++ */ ++ if (dsc->apllication_status & EMAC_RX_FRAME_CRC_ERR) { ++ netdev_dbg(priv->ndev, "rx frame crc error\n"); ++ return FRAME_DISCARD; ++ } ++ ++ /* When the length of the frame exceeds ++ * the Programmed Max Frame Length ++ */ ++ if (dsc->apllication_status & EMAC_RX_FRAME_MAX_LEN_ERR) { ++ netdev_dbg(priv->ndev, "rx frame too long\n"); ++ return FRAME_DISCARD; ++ } ++ ++ /* frame reception is truncated at that point and ++ * frame is considered to have Jabber Error ++ */ ++ if (dsc->apllication_status & EMAC_RX_FRAME_JABBER_ERR) { ++ netdev_dbg(priv->ndev, "rx frame has been truncated\n"); ++ return FRAME_DISCARD; ++ } ++ ++ /* this bit is only for 802.3 Type Frames */ ++ if (dsc->apllication_status & EMAC_RX_FRAME_LENGTH_ERR) { ++ netdev_dbg(priv->ndev, "rx frame length err for 802.3\n"); ++ return FRAME_DISCARD; ++ } ++ ++ if (dsc->frm_packet_len <= ETHERNET_FCS_SIZE || ++ dsc->frm_packet_len > priv->dma_buf_sz) { ++ netdev_dbg(priv->ndev, "rx frame len too small or too long\n"); ++ return FRAME_DISCARD; ++ } ++ ++ return FRAME_OK; ++} ++ ++static void emac_alloc_rx_desc_buffers(struct emac_priv *priv) ++{ ++ struct net_device *ndev = priv->ndev; ++ struct emac_desc_ring *rx_ring = &priv->rx_ring; ++ struct emac_desc_buffer *rx_buf; ++ struct sk_buff *skb; ++ struct emac_rx_desc *rx_desc; ++ u32 i; ++ ++ i = rx_ring->head; ++ rx_buf = &rx_ring->desc_buf[i]; ++ ++ while (!rx_buf->skb) { ++ skb = netdev_alloc_skb_ip_align(ndev, priv->dma_buf_sz); ++ if (!skb) ++ break; ++ ++ skb->dev = ndev; ++ ++ rx_buf->skb = skb; ++ rx_buf->dma_len = priv->dma_buf_sz; ++ rx_buf->dma_addr = dma_map_single(&priv->pdev->dev, skb->data, ++ priv->dma_buf_sz, DMA_FROM_DEVICE); ++ if (dma_mapping_error(&priv->pdev->dev, rx_buf->dma_addr)) { ++ netdev_err(ndev, "dma mapping_error\n"); ++ goto dma_map_err; ++ } ++ ++ rx_desc = &((struct emac_rx_desc *)rx_ring->desc_addr)[i]; ++ ++ memset(rx_desc, 0, sizeof(struct emac_rx_desc)); ++ ++ rx_desc->buf_addr1 = rx_buf->dma_addr; ++ rx_desc->buf_size1 = rx_buf->dma_len; ++ ++ rx_desc->first_descriptor = 0; ++ rx_desc->last_descriptor = 0; ++ if (++i == rx_ring->total_cnt) { ++ rx_desc->end_ring = 1; ++ i = 0; ++ } ++ dma_wmb(); ++ rx_desc->own = 1; ++ ++ rx_buf = &rx_ring->desc_buf[i]; ++ } ++ rx_ring->head = i; ++ ++ return; ++ ++dma_map_err: ++ dev_kfree_skb_any(skb); ++ rx_buf->skb = NULL; ++} ++ ++static int emac_rx_clean_desc(struct emac_priv *priv, int budget) ++{ ++ struct emac_desc_ring *rx_ring; ++ struct emac_desc_buffer *rx_buf; ++ struct net_device *ndev = priv->ndev; ++ struct emac_rx_desc *rx_desc; ++ struct sk_buff *skb = NULL; ++ int status; ++ u32 receive_packet = 0; ++ u32 i; ++ u32 skb_len; ++ ++ rx_ring = &priv->rx_ring; ++ ++ i = rx_ring->tail; ++ ++ while (budget--) { ++ /* get rx desc */ ++ rx_desc = &((struct emac_rx_desc *)rx_ring->desc_addr)[i]; ++ ++ /* if rx_desc still owned by DMA, so we need to wait */ ++ if (rx_desc->own) ++ break; ++ ++ rx_buf = &rx_ring->desc_buf[i]; ++ ++ if (!rx_buf->skb) ++ break; ++ ++ receive_packet++; ++ ++ dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr, ++ rx_buf->dma_len, DMA_FROM_DEVICE); ++ ++ status = emac_rx_frame_status(priv, rx_desc); ++ if (unlikely(status == FRAME_DISCARD)) { ++ ndev->stats.rx_dropped++; ++ dev_kfree_skb_irq(rx_buf->skb); ++ rx_buf->skb = NULL; ++ } else { ++ skb = rx_buf->skb; ++ skb_len = rx_desc->frm_packet_len - ETHERNET_FCS_SIZE; ++ skb_put(skb, skb_len); ++ skb->dev = ndev; ++ ndev->hard_header_len = ETH_HLEN; ++ ++ emac_get_rx_hwtstamp(priv, rx_desc, skb); ++ ++ skb->protocol = eth_type_trans(skb, ndev); ++ ++ skb->ip_summed = CHECKSUM_NONE; ++ ++ napi_gro_receive(&priv->napi, skb); ++ ++ ndev->stats.rx_packets++; ++ ndev->stats.rx_bytes += skb_len; ++ ++ memset(rx_desc, 0, sizeof(struct emac_rx_desc)); ++ rx_buf->skb = NULL; ++ } ++ ++ if (++i == rx_ring->total_cnt) ++ i = 0; ++ } ++ ++ rx_ring->tail = i; ++ ++ emac_alloc_rx_desc_buffers(priv); ++ ++ return receive_packet; ++} ++ ++static int emac_rx_poll(struct napi_struct *napi, int budget) ++{ ++ struct emac_priv *priv = container_of(napi, struct emac_priv, napi); ++ int work_done; ++ ++ emac_tx_clean_desc(priv); ++ ++ work_done = emac_rx_clean_desc(priv, budget); ++ if (work_done < budget) { ++ napi_complete(napi); ++ emac_enable_interrupt(priv); ++ } ++ ++ return work_done; ++} ++ ++static int emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb, ++ u32 max_tx_len, u32 frag_num) ++{ ++ struct emac_desc_ring *tx_ring; ++ struct emac_tx_desc_buffer *tx_buf; ++ struct emac_tx_desc *tx_desc; ++ u32 skb_linear_len = skb_headlen(skb); ++ u32 len, i, f, first, buf_idx = 0; ++ phys_addr_t addr; ++ u8 do_tx_timestamp = 0; ++ ++ tx_ring = &priv->tx_ring; ++ ++ i = tx_ring->head; ++ first = i; ++ ++ skb_tx_timestamp(skb); ++ ++ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)) { ++ /* declare that device is doing timestamping */ ++ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; ++ do_tx_timestamp = 1; ++ } ++ ++ if (++i == tx_ring->total_cnt) ++ i = 0; ++ ++ /* if the data is fragmented */ ++ for (f = 0; f < frag_num; f++) { ++ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; ++ ++ len = skb_frag_size(frag); ++ ++ buf_idx = (f + 1) % 2; ++ ++ /* first frag fill into second buffer of first descriptor */ ++ if (f == 0) { ++ tx_buf = &tx_ring->tx_desc_buf[first]; ++ tx_desc = &((struct emac_tx_desc *)tx_ring->desc_addr)[first]; ++ } else { ++ /* from second frags to more frags, ++ * we only get new descriptor when it frag num is odd. ++ */ ++ if (!buf_idx) { ++ tx_buf = &tx_ring->tx_desc_buf[i]; ++ tx_desc = &((struct emac_tx_desc *)tx_ring->desc_addr)[i]; ++ } ++ } ++ tx_buf->buf[buf_idx].dma_len = len; ++ ++ addr = skb_frag_dma_map(&priv->pdev->dev, frag, 0, ++ skb_frag_size(frag), DMA_TO_DEVICE); ++ ++ if (dma_mapping_error(&priv->pdev->dev, addr)) { ++ netdev_err(priv->ndev, "%s dma map page:%d error\n", __func__, f); ++ goto dma_map_err; ++ } ++ tx_buf->buf[buf_idx].dma_addr = addr; ++ tx_buf->buf[buf_idx].map_as_page = true; ++ ++ if (do_tx_timestamp) ++ tx_buf->timestamped = 1; ++ ++ /* every desc has two buffer for packet */ ++ ++ if (buf_idx) { ++ tx_desc->buf_addr2 = addr; ++ tx_desc->buf_size2 = len; ++ } else { ++ tx_desc->buf_addr1 = addr; ++ tx_desc->buf_size1 = len; ++ ++ if (++i == tx_ring->total_cnt) { ++ tx_desc->end_ring = 1; ++ i = 0; ++ } ++ } ++ ++ /* if frag num equal 1, we don't set tx_desc except buffer addr & size */ ++ if (f > 0) { ++ if (f == (frag_num - 1)) { ++ tx_desc->last_segment = 1; ++ tx_buf->skb = skb; ++ if (emac_tx_coal(priv, frag_num + 1)) ++ tx_desc->int_on_complet = 1; ++ } ++ ++ tx_desc->own = 1; ++ } ++ } ++ ++ /* fill out first descriptor for skb linear data */ ++ tx_buf = &tx_ring->tx_desc_buf[first]; ++ ++ tx_buf->buf[0].dma_len = skb_linear_len; ++ ++ addr = dma_map_single(&priv->pdev->dev, skb->data, skb_linear_len, DMA_TO_DEVICE); ++ if (dma_mapping_error(&priv->pdev->dev, addr)) { ++ netdev_err(priv->ndev, "%s dma mapping_error\n", __func__); ++ goto dma_map_err; ++ } ++ ++ tx_buf->buf[0].dma_addr = addr; ++ tx_buf->buf[0].buff_addr = skb->data; ++ tx_buf->buf[0].map_as_page = false; ++ ++ /* fill tx descriptor */ ++ tx_desc = &((struct emac_tx_desc *)tx_ring->desc_addr)[first]; ++ tx_desc->buf_addr1 = addr; ++ tx_desc->buf_size1 = skb_linear_len; ++ tx_desc->first_segment = 1; ++ ++ /* if last desc for ring, need to end ring flag */ ++ if (first == (tx_ring->total_cnt - 1)) ++ tx_desc->end_ring = 1; ++ ++ /* if frag num more than 1, that means data need another desc ++ * so current descriptor isn't last piece of packet data. ++ */ ++ tx_desc->last_segment = frag_num > 1 ? 0 : 1; ++ if (frag_num <= 1 && emac_tx_coal(priv, 1)) ++ tx_desc->int_on_complet = 1; ++ ++ if (do_tx_timestamp) { ++ tx_desc->tx_timestamp = 1; ++ tx_buf->timestamped = 1; ++ } ++ ++ /* only last descriptor had skb pointer */ ++ if (tx_desc->last_segment) ++ tx_buf->skb = skb; ++ ++ tx_desc->own = 1; ++ ++ dma_wmb(); ++ ++ emac_dma_start_transmit(priv); ++ ++ /* update tx ring head */ ++ tx_ring->head = i; ++ ++ return 0; ++ ++dma_map_err: ++ dev_kfree_skb_any(skb); ++ priv->ndev->stats.tx_dropped++; ++ return 0; ++} ++ ++static u32 read_tx_stat_cntr(struct emac_priv *priv, u8 cnt) ++{ ++ u32 val, tmp; ++ ++ val = 0x8000 | cnt; ++ emac_wr(priv, MAC_TX_STATCTR_CONTROL, val); ++ val = emac_rd(priv, MAC_TX_STATCTR_CONTROL); ++ ++ if (readl_poll_timeout_atomic(priv->iobase + MAC_TX_STATCTR_CONTROL, ++ val, !(val & 0x8000), 100, 10000)) { ++ pr_err("%s timeout!!\n", __func__); ++ return -EINVAL; ++ } ++ ++ tmp = emac_rd(priv, MAC_TX_STATCTR_DATA_HIGH); ++ val = tmp << 16; ++ tmp = emac_rd(priv, MAC_TX_STATCTR_DATA_LOW); ++ val |= tmp; ++ ++ return val; ++} ++ ++static u32 read_rx_stat_cntr(struct emac_priv *priv, u8 cnt) ++{ ++ u32 val, tmp; ++ ++ val = 0x8000 | cnt; ++ emac_wr(priv, MAC_RX_STATCTR_CONTROL, val); ++ val = emac_rd(priv, MAC_RX_STATCTR_CONTROL); ++ ++ if (readl_poll_timeout_atomic(priv->iobase + MAC_RX_STATCTR_CONTROL, ++ val, !(val & 0x8000), 100, 10000)) { ++ pr_err("%s timeout!!\n", __func__); ++ return -EINVAL; ++ } ++ ++ tmp = emac_rd(priv, MAC_RX_STATCTR_DATA_HIGH); ++ val = tmp << 16; ++ tmp = emac_rd(priv, MAC_RX_STATCTR_DATA_LOW); ++ val |= tmp; ++ ++ return val; ++} ++ ++static void emac_mac_multicast_filter_clear(struct emac_priv *priv) ++{ ++ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0); ++ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0); ++ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0); ++ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0); ++} ++ ++static void emac_reset(struct emac_priv *priv) ++{ ++ if (!test_and_clear_bit(EMAC_RESET_REQUESTED, &priv->state)) ++ return; ++ if (test_bit(EMAC_DOWN, &priv->state)) ++ return; ++ ++ netdev_err(priv->ndev, "Reset controller.\n"); ++ ++ rtnl_lock(); ++ netif_trans_update(priv->ndev); ++ while (test_and_set_bit(EMAC_RESETING, &priv->state)) ++ usleep_range(1000, 2000); ++ set_bit(EMAC_DOWN, &priv->state); ++ dev_close(priv->ndev); ++ dev_open(priv->ndev, NULL); ++ clear_bit(EMAC_DOWN, &priv->state); ++ clear_bit(EMAC_RESETING, &priv->state); ++ rtnl_unlock(); ++} ++ ++static void emac_tx_timeout_task(struct work_struct *work) ++{ ++ struct emac_priv *priv = container_of(work, struct emac_priv, tx_timeout_task); ++ ++ emac_reset(priv); ++ clear_bit(EMAC_TASK_SCHED, &priv->state); ++} ++ ++static int clk_phase_rgmii_set(struct emac_priv *priv, bool is_tx) ++{ ++ u32 val; ++ ++ switch (priv->clk_tuning_way) { ++ case CLK_TUNING_BY_REG: ++ val = readl(priv->ctrl_reg); ++ if (is_tx) { ++ val &= ~RGMII_TX_PHASE_MASK; ++ val |= (priv->tx_clk_phase & 0x7) << RGMII_TX_PHASE_OFFSET; ++ } else { ++ val &= ~RGMII_RX_PHASE_MASK; ++ val |= (priv->rx_clk_phase & 0x7) << RGMII_RX_PHASE_OFFSET; ++ } ++ writel(val, priv->ctrl_reg); ++ break; ++ ++ case CLK_TUNING_BY_DLINE: ++ val = readl(priv->dline_reg); ++ if (is_tx) { ++ val &= ~EMAC_TX_DLINE_CODE_MASK; ++ val |= priv->tx_clk_phase << EMAC_TX_DLINE_CODE_OFFSET; ++ val |= EMAC_TX_DLINE_EN; ++ } else { ++ val &= ~EMAC_RX_DLINE_CODE_MASK; ++ val |= priv->rx_clk_phase << EMAC_RX_DLINE_CODE_OFFSET; ++ val |= EMAC_RX_DLINE_EN; ++ } ++ writel(val, priv->dline_reg); ++ break; ++ ++ default: ++ pr_err("wrong clk tuning way:%d !!\n", priv->clk_tuning_way); ++ return -1; ++ } ++ pr_debug("%s tx phase:%d rx phase:%d\n", __func__, priv->tx_clk_phase, priv->rx_clk_phase); ++ ++ return 0; ++} ++ ++static int clk_phase_rmii_set(struct emac_priv *priv, bool is_tx) ++{ ++ u32 val; ++ ++ switch (priv->clk_tuning_way) { ++ case CLK_TUNING_BY_REG: ++ val = readl(priv->ctrl_reg); ++ if (is_tx) { ++ val &= ~RMII_TX_PHASE_MASK; ++ val |= (priv->tx_clk_phase & 0x7) << RMII_TX_PHASE_OFFSET; ++ } else { ++ val &= ~RMII_RX_PHASE_MASK; ++ val |= (priv->rx_clk_phase & 0x7) << RMII_RX_PHASE_OFFSET; ++ } ++ writel(val, priv->ctrl_reg); ++ break; ++ ++ case CLK_TUNING_BY_CLK_REVERT: ++ val = readl(priv->ctrl_reg); ++ if (is_tx) { ++ if (priv->tx_clk_phase == CLK_PHASE_REVERT) ++ val |= RMII_TX_CLK_SEL; ++ else ++ val &= ~RMII_TX_CLK_SEL; ++ } else { ++ if (priv->rx_clk_phase == CLK_PHASE_REVERT) ++ val |= RMII_RX_CLK_SEL; ++ else ++ val &= ~RMII_RX_CLK_SEL; ++ } ++ writel(val, priv->ctrl_reg); ++ break; ++ ++ default: ++ pr_err("wrong clk tuning way:%d !!\n", priv->clk_tuning_way); ++ return -1; ++ } ++ pr_debug("%s tx phase:%d rx phase:%d\n", __func__, priv->tx_clk_phase, priv->rx_clk_phase); ++ ++ return 0; ++} ++ ++static int rx_coal_param_set(struct emac_priv *priv) ++{ ++ u32 val; ++ ++ val = emac_rd(priv, DMA_RECEIVE_IRQ_MITIGATION_CTRL); ++ val &= ~MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MSK; ++ val |= (priv->rx_coal_frames & MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MSK); ++ val &= ~MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MSK; ++ val |= ((priv->rx_coal_timeout * AXI_CLK_CYCLES_PER_US) << ++ MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_OFST) & ++ MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MSK; ++ emac_wr(priv, DMA_RECEIVE_IRQ_MITIGATION_CTRL, val); ++ ++ return 0; ++} ++ ++static int clk_phase_set(struct emac_priv *priv, bool is_tx) ++{ ++ if (priv->clk_tuning_enable) { ++ if (emac_is_rmii(priv)) ++ clk_phase_rmii_set(priv, is_tx); ++ else ++ clk_phase_rgmii_set(priv, is_tx); ++ } ++ ++ return 0; ++} ++ ++static int emac_mii_reset(struct mii_bus *bus) ++{ ++ struct emac_priv *priv = bus->priv; ++ struct device *dev = &priv->pdev->dev; ++ int rst_gpio, ldo_gpio; ++ int active_state; ++ u32 delays[3] = {0}; ++ ++ if (dev->of_node) { ++ struct device_node *np = dev->of_node; ++ ++ if (!np) ++ return 0; ++ ++ ldo_gpio = of_get_named_gpio(np, "emac,ldo-gpio", 0); ++ if (ldo_gpio >= 0) { ++ if (gpio_request(ldo_gpio, "mdio-ldo")) ++ return 0; ++ ++ gpio_direction_output(ldo_gpio, 1); ++ } ++ ++ rst_gpio = of_get_named_gpio(np, "emac,reset-gpio", 0); ++ if (rst_gpio < 0) ++ return 0; ++ ++ active_state = of_property_read_bool(np, "emac,reset-active-low"); ++ of_property_read_u32_array(np, "emac,reset-delays-us", delays, 3); ++ ++ if (gpio_request(rst_gpio, "mdio-reset")) ++ return 0; ++ ++ gpio_direction_output(rst_gpio, active_state ? 1 : 0); ++ if (delays[0]) ++ msleep(DIV_ROUND_UP(delays[0], 1000)); ++ ++ gpio_set_value(rst_gpio, active_state ? 0 : 1); ++ if (delays[1]) ++ msleep(DIV_ROUND_UP(delays[1], 1000)); ++ ++ gpio_set_value(rst_gpio, active_state ? 1 : 0); ++ if (delays[2]) ++ msleep(DIV_ROUND_UP(delays[2], 1000)); ++ } ++ return 0; ++} ++ ++static int emac_mii_read(struct mii_bus *bus, int phy_addr, int regnum) ++{ ++ struct emac_priv *priv = bus->priv; ++ u32 cmd = 0; ++ u32 val; ++ ++ cmd |= phy_addr & 0x1F; ++ cmd |= (regnum & 0x1F) << 5; ++ cmd |= MREGBIT_START_MDIO_TRANS | MREGBIT_MDIO_READ_WRITE; ++ ++ emac_wr(priv, MAC_MDIO_DATA, 0x0); ++ emac_wr(priv, MAC_MDIO_CONTROL, cmd); ++ ++ if (readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, ++ val, !((val >> 15) & 0x1), 100, 10000)) ++ return -EBUSY; ++ ++ val = emac_rd(priv, MAC_MDIO_DATA); ++ return val; ++} ++ ++static int emac_mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value) ++{ ++ struct emac_priv *priv = bus->priv; ++ u32 cmd = 0; ++ u32 val; ++ ++ emac_wr(priv, MAC_MDIO_DATA, value); ++ ++ cmd |= phy_addr & 0x1F; ++ cmd |= (regnum & 0x1F) << 5; ++ cmd |= MREGBIT_START_MDIO_TRANS; ++ emac_wr(priv, MAC_MDIO_CONTROL, cmd); ++ ++ if (readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, ++ val, !((val >> 15) & 0x1), 100, 10000)) ++ return -EBUSY; ++ ++ return 0; ++} ++ ++static void emac_adjust_link(struct net_device *dev) ++{ ++ struct phy_device *phydev = dev->phydev; ++ struct emac_priv *priv = netdev_priv(dev); ++ bool link_changed = false; ++ u32 ctrl; ++ ++ if (!phydev) ++ return; ++ ++ if (phydev->link) { ++ ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL); ++ ++ /* Now we make sure that we can be in full duplex mode ++ * If not, we operate in half-duplex mode. ++ */ ++ if (phydev->duplex != priv->duplex) { ++ link_changed = true; ++ ++ if (!phydev->duplex) ++ ctrl &= ~MREGBIT_FULL_DUPLEX_MODE; ++ else ++ ctrl |= MREGBIT_FULL_DUPLEX_MODE; ++ priv->duplex = phydev->duplex; ++ } ++ ++ if (phydev->speed != priv->speed) { ++ link_changed = true; ++ ++ ctrl &= ~MREGBIT_SPEED; ++ ++ switch (phydev->speed) { ++ case SPEED_1000: ++ ctrl |= MREGBIT_SPEED_1000M; ++ break; ++ ++ case SPEED_100: ++ ctrl |= MREGBIT_SPEED_100M; ++ break; ++ ++ case SPEED_10: ++ ctrl |= MREGBIT_SPEED_10M; ++ break; ++ ++ default: ++ pr_err("broken speed: %d\n", phydev->speed); ++ phydev->speed = SPEED_UNKNOWN; ++ break; ++ } ++ if (phydev->speed != SPEED_UNKNOWN) ++ priv->speed = phydev->speed; ++ } ++ ++ emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl); ++ ++ if (!priv->link) { ++ priv->link = true; ++ link_changed = true; ++ } ++ } else if (priv->link) { ++ priv->link = false; ++ link_changed = true; ++ priv->duplex = DUPLEX_UNKNOWN; ++ priv->speed = SPEED_UNKNOWN; ++ } ++ ++ if (link_changed) ++ phy_print_status(phydev); ++} ++ ++static int emac_phy_connect(struct net_device *dev) ++{ ++ struct phy_device *phydev; ++ struct device_node *np; ++ struct emac_priv *priv = netdev_priv(dev); ++ ++ np = of_parse_phandle(priv->pdev->dev.of_node, "phy-handle", 0); ++ if (!np && of_phy_is_fixed_link(priv->pdev->dev.of_node)) ++ np = of_node_get(priv->pdev->dev.of_node); ++ if (!np) ++ return -ENODEV; ++ ++ of_get_phy_mode(np, &priv->phy_interface); ++ pr_info("priv phy_interface = %d\n", priv->phy_interface); ++ ++ emac_phy_interface_config(priv); ++ ++ phydev = of_phy_connect(dev, np, &emac_adjust_link, 0, priv->phy_interface); ++ if (IS_ERR_OR_NULL(phydev)) { ++ pr_err("Could not attach to PHY\n"); ++ if (!phydev) ++ return -ENODEV; ++ return PTR_ERR(phydev); ++ } ++ ++ pr_info("%s: %s: attached to PHY (UID 0x%x) Link = %d\n", ++ __func__, dev->name, phydev->phy_id, phydev->link); ++ ++ /* Indicate that the MAC is responsible for PHY PM */ ++ phydev->mac_managed_pm = true; ++ dev->phydev = phydev; ++ ++ clk_phase_set(priv, TX_PHASE); ++ clk_phase_set(priv, RX_PHASE); ++ return 0; ++} ++ ++static int emac_mdio_init(struct emac_priv *priv) ++{ ++ struct device_node *mii_np; ++ struct device *dev = &priv->pdev->dev; ++ int ret; ++ ++ mii_np = of_get_child_by_name(dev->of_node, "mdio-bus"); ++ if (!mii_np) { ++ if (of_phy_is_fixed_link(dev->of_node)) { ++ if ((of_phy_register_fixed_link(dev->of_node) < 0)) ++ return -ENODEV; ++ dev_dbg(dev, "find fixed link\n"); ++ return 0; ++ } ++ ++ dev_err(dev, "no %s child node found", "mdio-bus"); ++ return -ENODEV; ++ } ++ ++ if (!of_device_is_available(mii_np)) { ++ ret = -ENODEV; ++ goto err_put_node; ++ } ++ ++ priv->mii = devm_mdiobus_alloc(dev); ++ if (!priv->mii) { ++ ret = -ENOMEM; ++ goto err_put_node; ++ } ++ priv->mii->priv = priv; ++ priv->mii->name = "emac mii"; ++ priv->mii->reset = emac_mii_reset; ++ priv->mii->read = emac_mii_read; ++ priv->mii->write = emac_mii_write; ++ snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", priv->pdev->name); ++ priv->mii->parent = dev; ++ priv->mii->phy_mask = 0xffffffff; ++ ret = of_mdiobus_register(priv->mii, mii_np); ++ if (ret) { ++ dev_err(dev, "Failed to register mdio bus.\n"); ++ goto err_put_node; ++ } ++ ++ priv->phy = phy_find_first(priv->mii); ++ if (!priv->phy) { ++ dev_err(dev, "no PHY found\n"); ++ return -ENODEV; ++ } ++ ++err_put_node: ++ of_node_put(mii_np); ++ return ret; ++} ++ ++static int emac_mdio_deinit(struct emac_priv *priv) ++{ ++ if (!priv->mii) ++ return 0; ++ ++ mdiobus_unregister(priv->mii); ++ return 0; ++} ++ ++static void emac_stats_update(struct emac_priv *priv) ++{ ++ struct emac_hw_stats *hwstats = priv->hw_stats; ++ int i; ++ u32 *p; ++ ++ p = (u32 *)(hwstats); ++ ++ for (i = 0; i < MAX_TX_STATS_NUM; i++) ++ *(p + i) = read_tx_stat_cntr(priv, i); ++ ++ p = (u32 *)hwstats + MAX_TX_STATS_NUM; ++ ++ for (i = 0; i < MAX_RX_STATS_NUM; i++) ++ *(p + i) = read_rx_stat_cntr(priv, i); ++} ++ ++static int emac_get_link_ksettings(struct net_device *ndev, struct ethtool_link_ksettings *cmd) ++{ ++ if (!ndev->phydev) ++ return -ENODEV; ++ ++ phy_ethtool_ksettings_get(ndev->phydev, cmd); ++ return 0; ++} ++ ++static int emac_set_link_ksettings(struct net_device *ndev, ++ const struct ethtool_link_ksettings *cmd) ++{ ++ if (!ndev->phydev) ++ return -ENODEV; ++ ++ return phy_ethtool_ksettings_set(ndev->phydev, cmd); ++} ++ ++static void emac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) ++{ ++ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver)); ++ info->n_stats = ARRAY_SIZE(emac_ethtool_stats); ++} ++ ++static void emac_get_strings(struct net_device *dev, u32 stringset, u8 *data) ++{ ++ int i; ++ ++ switch (stringset) { ++ case ETH_SS_STATS: ++ for (i = 0; i < ARRAY_SIZE(emac_ethtool_stats); i++) { ++ memcpy(data, emac_ethtool_stats[i].str, ETH_GSTRING_LEN); ++ data += ETH_GSTRING_LEN; ++ } ++ break; ++ } ++} ++ ++static void emac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) ++{ ++ wol->supported = 0; ++ wol->wolopts = 0; ++ ++ if (dev->phydev) ++ phy_ethtool_get_wol(dev->phydev, wol); ++} ++ ++static int emac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) ++{ ++ struct emac_priv *priv = netdev_priv(dev); ++ struct device *kdev = &priv->pdev->dev; ++ int ret; ++ ++ if (!device_can_wakeup(kdev)) ++ return -EOPNOTSUPP; ++ ++ /* Try Wake-on-LAN from the PHY first */ ++ if (dev->phydev) { ++ ret = phy_ethtool_set_wol(dev->phydev, wol); ++ if (!ret) ++ device_set_wakeup_enable(kdev, !!wol->wolopts); ++ } ++ ++ return ret; ++} ++ ++static int emac_get_sset_count(struct net_device *dev, int sset) ++{ ++ switch (sset) { ++ case ETH_SS_STATS: ++ return ARRAY_SIZE(emac_ethtool_stats); ++ default: ++ return -EOPNOTSUPP; ++ } ++} ++ ++static void emac_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) ++{ ++ struct emac_priv *priv = netdev_priv(dev); ++ struct emac_hw_stats *hwstats = priv->hw_stats; ++ u32 *data_src; ++ u64 *data_dst; ++ int i; ++ ++ if (netif_running(dev) && netif_device_present(dev)) { ++ if (spin_trylock_bh(&hwstats->stats_lock)) { ++ emac_stats_update(priv); ++ spin_unlock_bh(&hwstats->stats_lock); ++ } ++ } ++ ++ data_dst = data; ++ ++ for (i = 0; i < ARRAY_SIZE(emac_ethtool_stats); i++) { ++ data_src = (u32 *)hwstats + emac_ethtool_stats[i].offset; ++ *data_dst++ = (u64)(*data_src); ++ } ++} ++ ++static void emac_ethtool_get_regs(struct net_device *dev, ++ struct ethtool_regs *regs, void *space) ++{ ++ struct emac_priv *priv = netdev_priv(dev); ++ u32 *reg_space = (u32 *)space; ++ void __iomem *base = priv->iobase; ++ int i; ++ ++ regs->version = 1; ++ ++ memset(reg_space, 0x0, EMAC_REG_SPACE_SIZE); ++ ++ for (i = 0; i < EMAC_DMA_REG_CNT; i++) ++ reg_space[i] = readl(base + DMA_CONFIGURATION + i * 4); ++ ++ for (i = 0; i < EMAC_MAC_REG_CNT; i++) ++ reg_space[i + EMAC_DMA_REG_CNT] = readl(base + MAC_GLOBAL_CONTROL + i * 4); ++} ++ ++static int emac_ethtool_get_regs_len(struct net_device *dev) ++{ ++ return EMAC_REG_SPACE_SIZE; ++} ++ ++static int emac_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) ++{ ++ struct emac_priv *priv = netdev_priv(dev); ++ ++ if (priv->ptp_support) { ++ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | ++ SOF_TIMESTAMPING_TX_HARDWARE | ++ SOF_TIMESTAMPING_RX_SOFTWARE | ++ SOF_TIMESTAMPING_RX_HARDWARE | ++ SOF_TIMESTAMPING_SOFTWARE | ++ SOF_TIMESTAMPING_RAW_HARDWARE; ++ ++ if (priv->ptp_clock) ++ info->phc_index = ptp_clock_index(priv->ptp_clock); ++ ++ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); ++ ++ info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) | ++ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | ++ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | ++ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | ++ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | ++ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | ++ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | ++ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | ++ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | ++ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | ++ (1 << HWTSTAMP_FILTER_ALL)); ++ return 0; ++ } else { ++ return ethtool_op_get_ts_info(dev, info); ++ } ++} ++ ++static int emac_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *c, ++ struct kernel_ethtool_coalesce *kernel_coal, ++ struct netlink_ext_ack *extack) ++{ ++ struct emac_priv *priv = netdev_priv(ndev); ++ ++ c->rx_coalesce_usecs = priv->rx_coal_timeout; ++ c->rx_max_coalesced_frames = priv->rx_coal_frames; ++ ++ c->rx_max_coalesced_frames_low = MIN_RX_COAL_FRAMES; ++ c->rx_max_coalesced_frames_high = MAX_RX_COAL_FRAMES; ++ ++ c->rx_coalesce_usecs_low = MIN_RX_COAL_TIMEOUT; ++ c->rx_coalesce_usecs_high = MAX_RX_COAL_TIMEOUT; ++ ++ return 0; ++} ++ ++static int emac_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c, ++ struct kernel_ethtool_coalesce *kernel_coal, ++ struct netlink_ext_ack *extack) ++{ ++ struct emac_priv *priv = netdev_priv(ndev); ++ ++ if (c->rx_max_coalesced_frames > MAX_RX_COAL_FRAMES || ++ c->rx_max_coalesced_frames < MIN_RX_COAL_FRAMES) ++ return -EINVAL; ++ ++ if (c->rx_coalesce_usecs > MAX_RX_COAL_TIMEOUT || ++ c->rx_coalesce_usecs < MIN_RX_COAL_TIMEOUT) ++ return -EINVAL; ++ ++ /* Only update hardware if a change occurred. */ ++ if (priv->rx_coal_frames == c->rx_max_coalesced_frames && ++ priv->rx_coal_timeout == c->rx_coalesce_usecs) ++ return 0; ++ ++ priv->rx_coal_frames = c->rx_max_coalesced_frames; ++ priv->rx_coal_timeout = c->rx_coalesce_usecs; ++ ++ rx_coal_param_set(priv); ++ ++ return 0; ++} ++ ++static const struct ethtool_ops emac_ethtool_ops = { ++ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_RX_MAX_FRAMES, ++ .get_link_ksettings = emac_get_link_ksettings, ++ .set_link_ksettings = emac_set_link_ksettings, ++ .get_drvinfo = emac_get_drvinfo, ++ .nway_reset = phy_ethtool_nway_reset, ++ .get_link = ethtool_op_get_link, ++ .get_strings = emac_get_strings, ++ .get_wol = emac_get_wol, ++ .set_wol = emac_set_wol, ++ .get_sset_count = emac_get_sset_count, ++ .get_ethtool_stats = emac_get_ethtool_stats, ++ .get_regs = emac_ethtool_get_regs, ++ .get_regs_len = emac_ethtool_get_regs_len, ++ .get_ts_info = emac_get_ts_info, ++ .get_coalesce = emac_get_coalesce, ++ .set_coalesce = emac_set_coalesce, ++}; ++ ++static int emac_up(struct emac_priv *priv) ++{ ++ struct net_device *ndev = priv->ndev; ++ int ret; ++ u32 val = 0; ++ ++ ret = emac_phy_connect(ndev); ++ if (ret) { ++ pr_err("%s phy_connet failed\n", __func__); ++ return ret; ++ } ++ ++ /* init hardware */ ++ emac_init_hw(priv); ++ emac_ptp_init(priv); ++ emac_set_mac_addr(priv, ndev->dev_addr); ++ /* configure transmit unit */ ++ emac_configure_tx(priv); ++ /* configure rx unit */ ++ emac_configure_rx(priv); ++ /* allocate buffers for receive descriptors */ ++ emac_alloc_rx_desc_buffers(priv); ++ phy_set_asym_pause(ndev->phydev, true, false); ++ if (ndev->phydev) ++ phy_start(ndev->phydev); ++ ++ /* allocates interrupt resources and ++ * enables the interrupt line and IRQ handling ++ */ ++ ret = request_irq(priv->irq, emac_interrupt_handler, IRQF_SHARED, ndev->name, ndev); ++ if (ret) { ++ pr_err("request_irq failed\n"); ++ goto request_irq_failed; ++ } ++ ++ /* enable mac interrupt */ ++ emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000); ++ ++ val = MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE | ++ MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE | ++ MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE | ++ MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE | ++ MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE; ++ emac_wr(priv, DMA_INTERRUPT_ENABLE, val); ++ ++ napi_enable(&priv->napi); ++ netif_start_queue(ndev); ++ return 0; ++ ++request_irq_failed: ++ emac_reset_hw(priv); ++ if (ndev->phydev) { ++ phy_stop(ndev->phydev); ++ phy_disconnect(ndev->phydev); ++ } ++ return ret; ++} ++ ++static int emac_open(struct net_device *ndev) ++{ ++ struct emac_priv *priv = netdev_priv(ndev); ++ int ret; ++ ++ ret = emac_alloc_tx_resources(priv); ++ if (ret) { ++ pr_err("Error in setting up the Tx resources\n"); ++ goto emac_alloc_tx_resource_fail; ++ } ++ ++ ret = emac_alloc_rx_resources(priv); ++ if (ret) { ++ pr_err("Error in setting up the Rx resources\n"); ++ goto emac_alloc_rx_resource_fail; ++ } ++ ++ ret = emac_up(priv); ++ if (ret) { ++ pr_err("Error in making the net intrface up\n"); ++ goto emac_up_fail; ++ } ++ return 0; ++ ++emac_up_fail: ++ emac_free_rx_resources(priv); ++emac_alloc_rx_resource_fail: ++ emac_free_tx_resources(priv); ++emac_alloc_tx_resource_fail: ++ return ret; ++} ++ ++static int emac_close(struct net_device *ndev) ++{ ++ struct emac_priv *priv = netdev_priv(ndev); ++ ++ emac_down(priv); ++ emac_free_tx_resources(priv); ++ emac_free_rx_resources(priv); ++ ++ return 0; ++} ++ ++static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev) ++{ ++ struct emac_priv *priv = netdev_priv(ndev); ++ int nfrags = skb_shinfo(skb)->nr_frags; ++ ++ if (unlikely(emac_tx_avail(priv) < nfrags + 1)) { ++ if (!netif_queue_stopped(ndev)) { ++ netif_stop_queue(ndev); ++ pr_err_ratelimited("tx ring full, stop tx queue\n"); ++ } ++ return NETDEV_TX_BUSY; ++ } ++ ++ emac_tx_mem_map(priv, skb, MAX_DATA_LEN_TX_DES, nfrags); ++ ++ ndev->stats.tx_packets++; ++ ndev->stats.tx_bytes += skb->len; ++ ++ /* Make sure there is space in the ring for the next send. */ ++ if (unlikely(emac_tx_avail(priv) <= (MAX_SKB_FRAGS + 2))) ++ netif_stop_queue(ndev); ++ ++ return NETDEV_TX_OK; ++} ++ ++static int emac_set_mac_address(struct net_device *ndev, void *addr) ++{ ++ struct emac_priv *priv = netdev_priv(ndev); ++ int ret = 0; ++ ++ ret = eth_mac_addr(ndev, addr); ++ if (ret) ++ return ret; ++ ++ /* if nic not running, we just save addr ++ * it will be set during device_open; ++ * otherwise directly change hw mac setting. ++ */ ++ if (netif_running(ndev)) ++ emac_set_mac_addr(priv, ndev->dev_addr); ++ ++ return ret; ++} ++ ++static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) ++{ ++ int ret = -EOPNOTSUPP; ++ ++ if (!netif_running(ndev)) ++ return -EINVAL; ++ ++ switch (cmd) { ++ case SIOCGMIIPHY: ++ case SIOCGMIIREG: ++ case SIOCSMIIREG: ++ if (!ndev->phydev) ++ return -EINVAL; ++ ret = phy_mii_ioctl(ndev->phydev, rq, cmd); ++ break; ++ case SIOCSHWTSTAMP: ++ ret = emac_hwtstamp_ioctl(ndev, rq); ++ break; ++ default: ++ break; ++ } ++ ++ return ret; ++} ++ ++static int emac_change_mtu(struct net_device *ndev, int mtu) ++{ ++ struct emac_priv *priv = netdev_priv(ndev); ++ u32 frame_len; ++ ++ if (netif_running(ndev)) { ++ pr_err("must be stopped to change its MTU\n"); ++ return -EBUSY; ++ } ++ ++ frame_len = mtu + ETHERNET_HEADER_SIZE + ETHERNET_FCS_SIZE; ++ ++ if (frame_len < MINIMUM_ETHERNET_FRAME_SIZE || frame_len > EMAC_RX_BUF_4K) { ++ pr_err("Invalid MTU setting\n"); ++ return -EINVAL; ++ } ++ ++ if (frame_len <= EMAC_DEFAULT_BUFSIZE) ++ priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE; ++ else if (frame_len <= EMAC_RX_BUF_2K) ++ priv->dma_buf_sz = EMAC_RX_BUF_2K; ++ else ++ priv->dma_buf_sz = EMAC_RX_BUF_4K; ++ ++ ndev->mtu = mtu; ++ ++ return 0; ++} ++ ++static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue) ++{ ++ struct emac_priv *priv = netdev_priv(ndev); ++ ++ netdev_info(ndev, "TX timeout\n"); ++ ++ netif_carrier_off(priv->ndev); ++ set_bit(EMAC_RESET_REQUESTED, &priv->state); ++ ++ if (!test_bit(EMAC_DOWN, &priv->state) && !test_and_set_bit(EMAC_TASK_SCHED, &priv->state)) ++ schedule_work(&priv->tx_timeout_task); ++} ++ ++static void emac_rx_mode_set(struct net_device *ndev) ++{ ++ struct emac_priv *priv = netdev_priv(ndev); ++ struct netdev_hw_addr *ha; ++ u32 mc_filter[4] = {0}; ++ u32 val; ++ u32 crc32, bit, reg, hash; ++ ++ val = emac_rd(priv, MAC_ADDRESS_CONTROL); ++ val &= ~MREGBIT_PROMISCUOUS_MODE; ++ if (ndev->flags & IFF_PROMISC) { ++ /* enable promisc mode */ ++ val |= MREGBIT_PROMISCUOUS_MODE; ++ } else if ((ndev->flags & IFF_ALLMULTI) || (netdev_mc_count(ndev) > HASH_TABLE_SIZE)) { ++ /* Pass all multi */ ++ /* Set the 64 bits of the HASH tab. To be updated if taller ++ * hash table is used ++ */ ++ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0xffff); ++ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0xffff); ++ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0xffff); ++ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0xffff); ++ } else if (!netdev_mc_empty(ndev)) { ++ emac_mac_multicast_filter_clear(priv); ++ netdev_for_each_mc_addr(ha, ndev) { ++ /* Calculate the CRC of the MAC address */ ++ crc32 = ether_crc(ETH_ALEN, ha->addr); ++ ++ /* The HASH Table is an array of 4 16-bit registers. It is ++ * treated like an array of 64 bits (BitArray[hash_value]). ++ * Use the upper 6 bits of the above CRC as the hash value. ++ */ ++ hash = (crc32 >> 26) & 0x3F; ++ reg = hash / 16; ++ bit = hash % 16; ++ mc_filter[reg] |= BIT(bit); ++ pr_debug("%s %pM crc32:0x%x hash:0x%x reg:%d bit:%d\n", ++ __func__, ha->addr, crc32, hash, reg, bit); ++ } ++ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, mc_filter[0]); ++ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, mc_filter[1]); ++ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, mc_filter[2]); ++ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, mc_filter[3]); ++ } ++ emac_wr(priv, MAC_ADDRESS_CONTROL, val); ++} ++ ++static const struct net_device_ops emac_netdev_ops = { ++ .ndo_open = emac_open, ++ .ndo_stop = emac_close, ++ .ndo_start_xmit = emac_start_xmit, ++ .ndo_set_mac_address = emac_set_mac_address, ++ .ndo_do_ioctl = emac_ioctl, ++ .ndo_eth_ioctl = emac_ioctl, ++ .ndo_change_mtu = emac_change_mtu, ++ .ndo_tx_timeout = emac_tx_timeout, ++ .ndo_set_rx_mode = emac_rx_mode_set, ++}; ++ ++static int emac_config_dt(struct platform_device *pdev, struct emac_priv *priv) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct resource *res; ++ u8 mac_addr[ETH_ALEN] = {0}; ++ u32 tx_phase, rx_phase; ++ u32 ctrl_reg; ++ int ret; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ priv->iobase = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(priv->iobase)) { ++ dev_err(&pdev->dev, "failed to io remap res reg 0\n"); ++ return -ENOMEM; ++ } ++ ++ if (of_property_read_u32(np, "k1,apmu-base-reg", &priv->apmu_base)) { ++ dev_err(&pdev->dev, "failed to io delay line configuration register\n"); ++ return -ENOMEM; ++ } ++ ++ priv->irq = irq_of_parse_and_map(np, 0); ++ if (!priv->irq) ++ return -ENXIO; ++ ++ if (of_property_read_u32(np, "ctrl-reg", &ctrl_reg)) { ++ dev_err(&pdev->dev, "cannot find ctrl register in device tree\n"); ++ return -EINVAL; ++ } ++ ++ priv->ctrl_reg = ioremap(priv->apmu_base + ctrl_reg, 4); ++ ++ if (of_property_read_u32(np, "tx-threshold", &priv->tx_threshold)) { ++ priv->tx_threshold = DEFAULT_TX_THRESHOLD; ++ dev_dbg(&pdev->dev, "%s tx_threshold using default value:%d\n", ++ __func__, priv->tx_threshold); ++ } ++ ++ if (of_property_read_u32(np, "rx-threshold", &priv->rx_threshold)) { ++ priv->rx_threshold = DEFAULT_RX_THRESHOLD; ++ dev_dbg(&pdev->dev, "%s rx_threshold using default value:%d\n", ++ __func__, priv->rx_threshold); ++ } ++ ++ if (of_property_read_u32(np, "tx-ring-num", &priv->tx_ring_num)) { ++ priv->tx_ring_num = DEFAULT_TX_RING_NUM; ++ dev_dbg(&pdev->dev, "%s tx_ring_num using default value:%d\n", ++ __func__, priv->tx_ring_num); ++ } ++ ++ if (of_property_read_u32(np, "rx-ring-num", &priv->rx_ring_num)) { ++ priv->rx_ring_num = DEFAULT_RX_RING_NUM; ++ dev_dbg(&pdev->dev, "%s rx_ring_num using default value:%d\n", ++ __func__, priv->rx_ring_num); ++ } ++ ++ if (of_property_read_u32(np, "dma-burst-len", &priv->dma_burst_len)) { ++ priv->dma_burst_len = DEFAULT_DMA_BURST_LEN; ++ dev_dbg(&pdev->dev, "%s dma_burst_len using default value:%d\n", ++ __func__, priv->dma_burst_len); ++ } else { ++ if (priv->dma_burst_len <= 0 || priv->dma_burst_len > 7) { ++ dev_err(&pdev->dev, "%s burst len illegal, use default vallue:%d\n", ++ __func__, DEFAULT_DMA_BURST_LEN); ++ priv->dma_burst_len = DEFAULT_DMA_BURST_LEN; ++ } ++ } ++ ++ if (of_property_read_u32(np, "rx_coal_frames", &priv->rx_coal_frames)) { ++ priv->rx_coal_frames = DEFAULT_RX_COAL_FRAMES; ++ dev_dbg(&pdev->dev, "%s rx_coal_frames using default value:%d\n", ++ __func__, priv->rx_coal_frames); ++ } ++ ++ if (of_property_read_u32(np, "rx_coal_timeout", &priv->rx_coal_timeout)) { ++ priv->rx_coal_timeout = DEFAULT_RX_COAL_TIMEOUT; ++ dev_dbg(&pdev->dev, "%s rx_coal_timeout using default value:%d\n", ++ __func__, priv->rx_coal_timeout); ++ } ++ ++ if (of_property_read_bool(np, "ref-clock-from-phy")) { ++ priv->ref_clk_frm_soc = 0; ++ dev_dbg(&pdev->dev, "%s ref clock from external phy\n", __func__); ++ } else { ++ priv->ref_clk_frm_soc = 1; ++ } ++ ++ ret = of_get_mac_address(np, mac_addr); ++ if (ret) { ++ if (ret == -EPROBE_DEFER) ++ return ret; ++ ++ dev_info(&pdev->dev, "Using random mac address\n"); ++ eth_hw_addr_random(priv->ndev); ++ } else { ++ eth_hw_addr_set(priv->ndev, mac_addr); ++ } ++ ++ priv->ptp_support = of_property_read_bool(np, "ptp-support"); ++ if (priv->ptp_support) { ++ dev_dbg(&pdev->dev, "EMAC support IEEE1588 PTP Protocol\n"); ++ if (of_property_read_u32(np, "ptp-clk-rate", &priv->ptp_clk_rate)) ++ priv->ptp_clk_rate = 20000000; ++ } ++ priv->clk_tuning_enable = of_property_read_bool(np, "clk-tuning-enable"); ++ if (priv->clk_tuning_enable) { ++ if (of_property_read_bool(np, "clk-tuning-by-reg")) { ++ priv->clk_tuning_way = CLK_TUNING_BY_REG; ++ } else if (of_property_read_bool(np, "clk-tuning-by-clk-revert")) { ++ priv->clk_tuning_way = CLK_TUNING_BY_CLK_REVERT; ++ } else if (of_property_read_bool(np, "clk-tuning-by-delayline")) { ++ priv->clk_tuning_way = CLK_TUNING_BY_DLINE; ++ if (of_property_read_u32(np, "dline-reg", &ctrl_reg)) { ++ dev_err(&pdev->dev, "Delayline register not found in dts\n"); ++ return -EINVAL; ++ } ++ priv->dline_reg = ioremap(priv->apmu_base + ctrl_reg, 4); ++ } else { ++ priv->clk_tuning_way = CLK_TUNING_BY_REG; ++ } ++ ++ if (of_property_read_u32(np, "tx-phase", &tx_phase)) ++ priv->tx_clk_phase = TXCLK_PHASE_DEFAULT; ++ else ++ priv->tx_clk_phase = tx_phase; ++ ++ if (of_property_read_u32(np, "rx-phase", &rx_phase)) ++ priv->rx_clk_phase = RXCLK_PHASE_DEFAULT; ++ else ++ priv->rx_clk_phase = rx_phase; ++ } ++ ++ return 0; ++} ++ ++static int emac_sw_init(struct emac_priv *priv) ++{ ++ priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE; ++ ++ priv->tx_ring.total_cnt = priv->tx_ring_num; ++ priv->rx_ring.total_cnt = priv->rx_ring_num; ++ ++ spin_lock_init(&priv->hw_stats->stats_lock); ++ ++ INIT_WORK(&priv->tx_timeout_task, emac_tx_timeout_task); ++ ++ priv->tx_coal_frames = EMAC_TX_FRAMES; ++ priv->tx_coal_timeout = EMAC_TX_COAL_TIMEOUT; ++ ++ timer_setup(&priv->txtimer, emac_tx_coal_timer, 0); ++ ++ return 0; ++} ++ ++static int emac_probe(struct platform_device *pdev) ++{ ++ struct emac_priv *priv; ++ struct net_device *ndev = NULL; ++ int ret; ++ ++ ndev = alloc_etherdev(sizeof(struct emac_priv)); ++ if (!ndev) ++ return -ENOMEM; ++ ++ ndev->hw_features = NETIF_F_SG; ++ ndev->features |= ndev->hw_features; ++ priv = netdev_priv(ndev); ++ priv->ndev = ndev; ++ priv->pdev = pdev; ++ platform_set_drvdata(pdev, priv); ++ priv->hw_stats = devm_kzalloc(&pdev->dev, sizeof(*priv->hw_stats), GFP_KERNEL); ++ if (!priv->hw_stats) { ++ dev_err(&pdev->dev, "failed to allocate counter memory\n"); ++ ret = -ENOMEM; ++ goto err_netdev; ++ } ++ ++ ret = emac_config_dt(pdev, priv); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "failed to config dt\n"); ++ goto err_netdev; ++ } ++ ++ ndev->watchdog_timeo = 5 * HZ; ++ ndev->base_addr = (unsigned long)priv->iobase; ++ ndev->irq = priv->irq; ++ ++ ndev->ethtool_ops = &emac_ethtool_ops; ++ ndev->netdev_ops = &emac_netdev_ops; ++ ++ priv->mac_clk = devm_clk_get(&pdev->dev, "emac-clk"); ++ if (IS_ERR(priv->mac_clk)) { ++ dev_err(&pdev->dev, "emac clock not found.\n"); ++ ret = PTR_ERR(priv->mac_clk); ++ goto err_netdev; ++ } ++ ++ ret = clk_prepare_enable(priv->mac_clk); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "failed to enable emac clock: %d\n", ret); ++ goto err_netdev; ++ } ++ ++ if (priv->ref_clk_frm_soc) { ++ priv->phy_clk = devm_clk_get(&pdev->dev, "phy-clk"); ++ if (IS_ERR(priv->phy_clk)) { ++ dev_err(&pdev->dev, "phy clock not found.\n"); ++ ret = PTR_ERR(priv->phy_clk); ++ goto mac_clk_disable; ++ } ++ ++ ret = clk_prepare_enable(priv->phy_clk); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "failed to enable phy clock: %d\n", ret); ++ goto mac_clk_disable; ++ } ++ } ++ if (priv->ptp_support) { ++ priv->ptp_clk = devm_clk_get(&pdev->dev, "ptp-clk"); ++ if (IS_ERR(priv->ptp_clk)) { ++ dev_err(&pdev->dev, "ptp clock not found.\n"); ++ ret = PTR_ERR(priv->ptp_clk); ++ goto phy_clk_disable; ++ } ++ } ++ ++ priv->reset = devm_reset_control_get_optional(&pdev->dev, NULL); ++ if (IS_ERR(priv->reset)) { ++ dev_err(&pdev->dev, "Failed to get emac's resets\n"); ++ goto ptp_clk_disable; ++ } ++ reset_control_deassert(priv->reset); ++ ++ emac_sw_init(priv); ++ ++ ret = emac_mdio_init(priv); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to init mdio.\n"); ++ goto reset_assert; ++ } ++ ++ SET_NETDEV_DEV(ndev, &pdev->dev); ++ ++ ret = register_netdev(ndev); ++ if (ret) { ++ pr_err("register_netdev failed\n"); ++ goto err_mdio_deinit; ++ } ++ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); ++ ++ netif_napi_add(ndev, &priv->napi, emac_rx_poll); ++ ++ return 0; ++ ++err_mdio_deinit: ++ emac_mdio_deinit(priv); ++reset_assert: ++ reset_control_assert(priv->reset); ++ptp_clk_disable: ++ if (priv->ptp_support) ++ clk_disable_unprepare(priv->ptp_clk); ++phy_clk_disable: ++ if (priv->ref_clk_frm_soc) ++ clk_disable_unprepare(priv->phy_clk); ++ del_timer_sync(&priv->txtimer); ++mac_clk_disable: ++ clk_disable_unprepare(priv->mac_clk); ++err_netdev: ++ free_netdev(ndev); ++ return ret; ++} ++ ++static int emac_remove(struct platform_device *pdev) ++{ ++ struct emac_priv *priv = platform_get_drvdata(pdev); ++ ++ unregister_netdev(priv->ndev); ++ emac_reset_hw(priv); ++ free_netdev(priv->ndev); ++ emac_mdio_deinit(priv); ++ reset_control_assert(priv->reset); ++ clk_disable_unprepare(priv->mac_clk); ++ if (priv->ref_clk_frm_soc) ++ clk_disable_unprepare(priv->phy_clk); ++ return 0; ++} ++ ++static const struct of_device_id emac_of_match[] = { ++ { .compatible = "spacemit,k1-emac" }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, emac_of_match); ++ ++static struct platform_driver emac_driver = { ++ .probe = emac_probe, ++ .remove = emac_remove, ++ .driver = { ++ .name = DRIVER_NAME, ++ .of_match_table = of_match_ptr(emac_of_match), ++ }, ++}; ++ ++module_platform_driver(emac_driver); ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("Ethernet driver for Spacemit k1 Emac"); ++MODULE_ALIAS("platform:spacemit_eth"); +diff --git a/drivers/net/ethernet/spacemit/k1-emac.h b/drivers/net/ethernet/spacemit/k1-emac.h +new file mode 100644 +index 000000000000..4217880f1be5 +--- /dev/null ++++ b/drivers/net/ethernet/spacemit/k1-emac.h +@@ -0,0 +1,727 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * spacemit k1 emac driver ++ * ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++#ifndef _K1_EMAC_H_ ++#define _K1_EMAC_H_ ++#include ++#include ++ ++#define PHY_INTF_RGMII BIT(2) ++ ++/* only valid for rmii mode ++ * 0: ref clock from external phy ++ * 1: ref clock from soc ++ */ ++#define REF_CLK_SEL BIT(3) ++ ++/* emac function clock select ++ * 0: 208M ++ * 1: 312M ++ */ ++#define FUNC_CLK_SEL BIT(4) ++ ++/* only valid for rmii, invert tx clk */ ++#define RMII_TX_CLK_SEL BIT(6) ++ ++/* only valid for rmii, invert rx clk */ ++#define RMII_RX_CLK_SEL BIT(7) ++ ++/* only valid for rgmiii ++ * 0: tx clk from rx clk ++ * 1: tx clk from soc ++ */ ++#define RGMII_TX_CLK_SEL BIT(8) ++ ++#define PHY_IRQ_EN BIT(12) ++#define AXI_SINGLE_ID BIT(13) ++ ++#define RMII_TX_PHASE_OFFSET (16) ++#define RMII_TX_PHASE_MASK GENMASK(18, 16) ++#define RMII_RX_PHASE_OFFSET (20) ++#define RMII_RX_PHASE_MASK GENMASK(22, 20) ++ ++#define RGMII_TX_PHASE_OFFSET (24) ++#define RGMII_TX_PHASE_MASK GENMASK(26, 24) ++#define RGMII_RX_PHASE_OFFSET (28) ++#define RGMII_RX_PHASE_MASK GENMASK(30, 28) ++ ++#define EMAC_RX_DLINE_EN BIT(0) ++#define EMAC_RX_DLINE_STEP_OFFSET (4) ++#define EMAC_RX_DLINE_STEP_MASK GENMASK(5, 4) ++#define EMAC_RX_DLINE_CODE_OFFSET (8) ++#define EMAC_RX_DLINE_CODE_MASK GENMASK(15, 8) ++ ++#define EMAC_TX_DLINE_EN BIT(16) ++#define EMAC_TX_DLINE_STEP_OFFSET (20) ++#define EMAC_TX_DLINE_STEP_MASK GENMASK(21, 20) ++#define EMAC_TX_DLINE_CODE_OFFSET (24) ++#define EMAC_TX_DLINE_CODE_MASK GENMASK(31, 24) ++ ++/* DMA register set */ ++#define DMA_CONFIGURATION 0x0000 ++#define DMA_CONTROL 0x0004 ++#define DMA_STATUS_IRQ 0x0008 ++#define DMA_INTERRUPT_ENABLE 0x000C ++ ++#define DMA_TRANSMIT_AUTO_POLL_COUNTER 0x0010 ++#define DMA_TRANSMIT_POLL_DEMAND 0x0014 ++#define DMA_RECEIVE_POLL_DEMAND 0x0018 ++ ++#define DMA_TRANSMIT_BASE_ADDRESS 0x001C ++#define DMA_RECEIVE_BASE_ADDRESS 0x0020 ++#define DMA_MISSED_FRAME_COUNTER 0x0024 ++#define DMA_STOP_FLUSH_COUNTER 0x0028 ++ ++#define DMA_RECEIVE_IRQ_MITIGATION_CTRL 0x002C ++ ++#define DMA_CURRENT_TRANSMIT_DESCRIPTOR_POINTER 0x0030 ++#define DMA_CURRENT_TRANSMIT_BUFFER_POINTER 0x0034 ++#define DMA_CURRENT_RECEIVE_DESCRIPTOR_POINTER 0x0038 ++#define DMA_CURRENT_RECEIVE_BUFFER_POINTER 0x003C ++ ++/* MAC Register set */ ++#define MAC_GLOBAL_CONTROL 0x0100 ++#define MAC_TRANSMIT_CONTROL 0x0104 ++#define MAC_RECEIVE_CONTROL 0x0108 ++#define MAC_MAXIMUM_FRAME_SIZE 0x010C ++#define MAC_TRANSMIT_JABBER_SIZE 0x0110 ++#define MAC_RECEIVE_JABBER_SIZE 0x0114 ++#define MAC_ADDRESS_CONTROL 0x0118 ++#define MAC_MDIO_CLK_DIV 0x011C ++#define MAC_ADDRESS1_HIGH 0x0120 ++#define MAC_ADDRESS1_MED 0x0124 ++#define MAC_ADDRESS1_LOW 0x0128 ++#define MAC_ADDRESS2_HIGH 0x012C ++#define MAC_ADDRESS2_MED 0x0130 ++#define MAC_ADDRESS2_LOW 0x0134 ++#define MAC_ADDRESS3_HIGH 0x0138 ++#define MAC_ADDRESS3_MED 0x013C ++#define MAC_ADDRESS3_LOW 0x0140 ++#define MAC_ADDRESS4_HIGH 0x0144 ++#define MAC_ADDRESS4_MED 0x0148 ++#define MAC_ADDRESS4_LOW 0x014C ++#define MAC_MULTICAST_HASH_TABLE1 0x0150 ++#define MAC_MULTICAST_HASH_TABLE2 0x0154 ++#define MAC_MULTICAST_HASH_TABLE3 0x0158 ++#define MAC_MULTICAST_HASH_TABLE4 0x015C ++#define MAC_FC_CONTROL 0x0160 ++#define MAC_FC_PAUSE_FRAME_GENERATE 0x0164 ++#define MAC_FC_SOURCE_ADDRESS_HIGH 0x0168 ++#define MAC_FC_SOURCE_ADDRESS_MED 0x016C ++#define MAC_FC_SOURCE_ADDRESS_LOW 0x0170 ++#define MAC_FC_DESTINATION_ADDRESS_HIGH 0x0174 ++#define MAC_FC_DESTINATION_ADDRESS_MED 0x0178 ++#define MAC_FC_DESTINATION_ADDRESS_LOW 0x017C ++#define MAC_FC_PAUSE_TIME_VALUE 0x0180 ++#define MAC_MDIO_CONTROL 0x01A0 ++#define MAC_MDIO_DATA 0x01A4 ++#define MAC_RX_STATCTR_CONTROL 0x01A8 ++#define MAC_RX_STATCTR_DATA_HIGH 0x01AC ++#define MAC_RX_STATCTR_DATA_LOW 0x01B0 ++#define MAC_TX_STATCTR_CONTROL 0x01B4 ++#define MAC_TX_STATCTR_DATA_HIGH 0x01B8 ++#define MAC_TX_STATCTR_DATA_LOW 0x01BC ++#define MAC_TRANSMIT_FIFO_ALMOST_FULL 0x01C0 ++#define MAC_TRANSMIT_PACKET_START_THRESHOLD 0x01C4 ++#define MAC_RECEIVE_PACKET_START_THRESHOLD 0x01C8 ++#define MAC_STATUS_IRQ 0x01E0 ++#define MAC_INTERRUPT_ENABLE 0x01E4 ++ ++/* DMA_CONFIGURATION (0x0000) register bit info ++ * 0-DMA controller in normal operation mode, ++ * 1-DMA controller reset to default state, ++ * clearing all internal state information ++ */ ++#define MREGBIT_SOFTWARE_RESET BIT(0) ++#define MREGBIT_BURST_1WORD BIT(1) ++#define MREGBIT_BURST_2WORD BIT(2) ++#define MREGBIT_BURST_4WORD BIT(3) ++#define MREGBIT_BURST_8WORD BIT(4) ++#define MREGBIT_BURST_16WORD BIT(5) ++#define MREGBIT_BURST_32WORD BIT(6) ++#define MREGBIT_BURST_64WORD BIT(7) ++#define MREGBIT_BURST_LENGTH GENMASK(7, 1) ++#define MREGBIT_DESCRIPTOR_SKIP_LENGTH GENMASK(12, 8) ++/* For Receive and Transmit DMA operate in Big-Endian mode for Descriptors. */ ++#define MREGBIT_DESCRIPTOR_BYTE_ORDERING BIT(13) ++#define MREGBIT_BIG_LITLE_ENDIAN BIT(14) ++#define MREGBIT_TX_RX_ARBITRATION BIT(15) ++#define MREGBIT_WAIT_FOR_DONE BIT(16) ++#define MREGBIT_STRICT_BURST BIT(17) ++#define MREGBIT_DMA_64BIT_MODE BIT(18) ++ ++/* DMA_CONTROL (0x0004) register bit info */ ++#define MREGBIT_START_STOP_TRANSMIT_DMA BIT(0) ++#define MREGBIT_START_STOP_RECEIVE_DMA BIT(1) ++ ++/* DMA_STATUS_IRQ (0x0008) register bit info */ ++#define MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ BIT(0) ++#define MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ BIT(1) ++#define MREGBIT_TRANSMIT_DMA_STOPPED_IRQ BIT(2) ++#define MREGBIT_RECEIVE_TRANSFER_DONE_IRQ BIT(4) ++#define MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ BIT(5) ++#define MREGBIT_RECEIVE_DMA_STOPPED_IRQ BIT(6) ++#define MREGBIT_RECEIVE_MISSED_FRAME_IRQ BIT(7) ++#define MREGBIT_MAC_IRQ BIT(8) ++#define MREGBIT_TRANSMIT_DMA_STATE GENMASK(18, 16) ++#define MREGBIT_RECEIVE_DMA_STATE GENMASK(23, 20) ++ ++/* DMA_INTERRUPT_ENABLE ( 0x000C) register bit info */ ++#define MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE BIT(0) ++#define MREGBIT_TRANSMIT_DES_UNAVAILABLE_INTR_ENABLE BIT(1) ++#define MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE BIT(2) ++#define MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE BIT(4) ++#define MREGBIT_RECEIVE_DES_UNAVAILABLE_INTR_ENABLE BIT(5) ++#define MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE BIT(6) ++#define MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE BIT(7) ++#define MREGBIT_MAC_INTR_ENABLE BIT(8) ++ ++/* DMA RECEIVE IRQ MITIGATION CONTROL */ ++#define MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MSK GENMASK(7, 0) ++#define MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_OFST (8) ++#define MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MSK GENMASK(27, 8) ++#define MRGEBIT_RECEIVE_IRQ_FRAME_COUNTER_MODE BIT(30) ++#define MRGEBIT_RECEIVE_IRQ_MITIGATION_ENABLE BIT(31) ++ ++/* MAC_GLOBAL_CONTROL (0x0100) register bit info */ ++#define MREGBIT_SPEED GENMASK(1, 0) ++#define MREGBIT_SPEED_10M 0x0 ++#define MREGBIT_SPEED_100M BIT(0) ++#define MREGBIT_SPEED_1000M BIT(1) ++#define MREGBIT_FULL_DUPLEX_MODE BIT(2) ++#define MREGBIT_RESET_RX_STAT_COUNTERS BIT(3) ++#define MREGBIT_RESET_TX_STAT_COUNTERS BIT(4) ++#define MREGBIT_UNICAST_WAKEUP_MODE BIT(8) ++#define MREGBIT_MAGIC_PACKET_WAKEUP_MODE BIT(9) ++ ++/* MAC_TRANSMIT_CONTROL (0x0104) register bit info */ ++#define MREGBIT_TRANSMIT_ENABLE BIT(0) ++#define MREGBIT_INVERT_FCS BIT(1) ++#define MREGBIT_DISABLE_FCS_INSERT BIT(2) ++#define MREGBIT_TRANSMIT_AUTO_RETRY BIT(3) ++#define MREGBIT_IFG_LEN GENMASK(6, 4) ++#define MREGBIT_PREAMBLE_LENGTH GENMASK(9, 7) ++ ++/* MAC_RECEIVE_CONTROL (0x0108) register bit info */ ++#define MREGBIT_RECEIVE_ENABLE BIT(0) ++#define MREGBIT_DISABLE_FCS_CHECK BIT(1) ++#define MREGBIT_STRIP_FCS BIT(2) ++#define MREGBIT_STORE_FORWARD BIT(3) ++#define MREGBIT_STATUS_FIRST BIT(4) ++#define MREGBIT_PASS_BAD_FRAMES BIT(5) ++#define MREGBIT_ACOOUNT_VLAN BIT(6) ++ ++/* MAC_MAXIMUM_FRAME_SIZE (0x010C) register bit info */ ++#define MREGBIT_MAX_FRAME_SIZE GENMASK(13, 0) ++ ++/* MAC_TRANSMIT_JABBER_SIZE (0x0110) register bit info */ ++#define MREGBIT_TRANSMIT_JABBER_SIZE GENMASK(15, 0) ++ ++/* MAC_RECEIVE_JABBER_SIZE (0x0114) register bit info */ ++#define MREGBIT_RECEIVE_JABBER_SIZE GENMASK(15, 0) ++ ++/* MAC_ADDRESS_CONTROL (0x0118) register bit info */ ++#define MREGBIT_MAC_ADDRESS1_ENABLE BIT(0) ++#define MREGBIT_MAC_ADDRESS2_ENABLE BIT(1) ++#define MREGBIT_MAC_ADDRESS3_ENABLE BIT(2) ++#define MREGBIT_MAC_ADDRESS4_ENABLE BIT(3) ++#define MREGBIT_INVERSE_MAC_ADDRESS1_ENABLE BIT(4) ++#define MREGBIT_INVERSE_MAC_ADDRESS2_ENABLE BIT(5) ++#define MREGBIT_INVERSE_MAC_ADDRESS3_ENABLE BIT(6) ++#define MREGBIT_INVERSE_MAC_ADDRESS4_ENABLE BIT(7) ++#define MREGBIT_PROMISCUOUS_MODE BIT(8) ++ ++/* MAC_ADDRESSx_HIGH (0x0120) register bit info */ ++#define MREGBIT_MAC_ADDRESS1_01_BYTE GENMASK(7, 0) ++#define MREGBIT_MAC_ADDRESS1_02_BYTE GENMASK(15, 8) ++/* MAC_ADDRESSx_MED (0x0124) register bit info */ ++#define MREGBIT_MAC_ADDRESS1_03_BYTE GENMASK(7, 0) ++#define MREGBIT_MAC_ADDRESS1_04_BYTE GENMASK(15, 8) ++/* MAC_ADDRESSx_LOW (0x0128) register bit info */ ++#define MREGBIT_MAC_ADDRESS1_05_BYTE GENMASK(7, 0) ++#define MREGBIT_MAC_ADDRESS1_06_BYTE GENMASK(15, 8) ++ ++/* MAC_FC_CONTROL (0x0160) register bit info */ ++#define MREGBIT_FC_DECODE_ENABLE BIT(0) ++#define MREGBIT_FC_GENERATION_ENABLE BIT(1) ++#define MREGBIT_AUTO_FC_GENERATION_ENABLE BIT(2) ++#define MREGBIT_MULTICAST_MODE BIT(3) ++#define MREGBIT_BLOCK_PAUSE_FRAMES BIT(4) ++ ++/* MAC_FC_PAUSE_FRAME_GENERATE (0x0164) register bit info */ ++#define MREGBIT_GENERATE_PAUSE_FRAME BIT(0) ++ ++/* MAC_FC_SRC/DST_ADDRESS_HIGH (0x0168) register bit info */ ++#define MREGBIT_MAC_ADDRESS_01_BYTE GENMASK(7, 0) ++#define MREGBIT_MAC_ADDRESS_02_BYTE GENMASK(15, 8) ++/* MAC_FC_SRC/DST_ADDRESS_MED (0x016C) register bit info */ ++#define MREGBIT_MAC_ADDRESS_03_BYTE GENMASK(7, 0) ++#define MREGBIT_MAC_ADDRESS_04_BYTE GENMASK(15, 8) ++/* MAC_FC_SRC/DSTD_ADDRESS_LOW (0x0170) register bit info */ ++#define MREGBIT_MAC_ADDRESS_05_BYTE GENMASK(7, 0) ++#define MREGBIT_MAC_ADDRESS_06_BYTE GENMASK(15, 8) ++ ++/* MAC_FC_PAUSE_TIME_VALUE (0x0180) register bit info */ ++#define MREGBIT_MAC_FC_PAUSE_TIME GENMASK(15, 0) ++ ++/* MAC_MDIO_CONTROL (0x01A0) register bit info */ ++#define MREGBIT_PHY_ADDRESS GENMASK(4, 0) ++#define MREGBIT_REGISTER_ADDRESS GENMASK(9, 5) ++#define MREGBIT_MDIO_READ_WRITE BIT(10) ++#define MREGBIT_START_MDIO_TRANS BIT(15) ++ ++/* MAC_MDIO_DATA (0x01A4) register bit info */ ++#define MREGBIT_MDIO_DATA GENMASK(15, 0) ++ ++/* MAC_RX_STATCTR_CONTROL (0x01A8) register bit info */ ++#define MREGBIT_RX_COUNTER_NUMBER GENMASK(4, 0) ++#define MREGBIT_START_RX_COUNTER_READ BIT(15) ++ ++/* MAC_RX_STATCTR_DATA_HIGH (0x01AC) register bit info */ ++#define MREGBIT_RX_STATCTR_DATA_HIGH GENMASK(15, 0) ++/* MAC_RX_STATCTR_DATA_LOW (0x01B0) register bit info */ ++#define MREGBIT_RX_STATCTR_DATA_LOW GENMASK(15, 0) ++ ++/* MAC_TX_STATCTR_CONTROL (0x01B4) register bit info */ ++#define MREGBIT_TX_COUNTER_NUMBER GENMASK(4, 0) ++#define MREGBIT_START_TX_COUNTER_READ BIT(15) ++ ++/* MAC_TX_STATCTR_DATA_HIGH (0x01B8) register bit info */ ++#define MREGBIT_TX_STATCTR_DATA_HIGH GENMASK(15, 0) ++/* MAC_TX_STATCTR_DATA_LOW (0x01BC) register bit info */ ++#define MREGBIT_TX_STATCTR_DATA_LOW GENMASK(15, 0) ++ ++/* MAC_TRANSMIT_FIFO_ALMOST_FULL (0x01C0) register bit info */ ++#define MREGBIT_TX_FIFO_AF GENMASK(13, 0) ++ ++/* MAC_TRANSMIT_PACKET_START_THRESHOLD (0x01C4) register bit info */ ++#define MREGBIT_TX_PACKET_START_THRESHOLD GENMASK(13, 0) ++ ++/* MAC_RECEIVE_PACKET_START_THRESHOLD (0x01C8) register bit info */ ++#define MREGBIT_RX_PACKET_START_THRESHOLD GENMASK(13, 0) ++ ++/* MAC_STATUS_IRQ (0x01E0) register bit info */ ++#define MREGBIT_MAC_UNDERRUN_IRQ BIT(0) ++#define MREGBIT_MAC_JABBER_IRQ BIT(1) ++ ++/* MAC_INTERRUPT_ENABLE (0x01E4) register bit info */ ++#define MREGBIT_MAC_UNDERRUN_INTERRUPT_ENABLE BIT(0) ++#define MREGBIT_JABBER_INTERRUPT_ENABLE BIT(1) ++ ++/* Receive Descriptors */ ++/* MAC_RECEIVE_DESCRIPTOR0 () register bit info */ ++#define MREGBIT_FRAME_LENGTH GENMASK(13, 0) ++#define MREGBIT_APPLICATION_STATUS GENMASK(28, 14) ++#define MREGBIT_LAST_DESCRIPTOR BIT(29) ++#define MREGBIT_FIRST_DESCRIPTOR BIT(30) ++#define MREGBIT_OWN_BIT BIT(31) ++ ++/* MAC_RECEIVE_DESCRIPTOR1 () register bit info */ ++#define MREGBIT_BUFFER1_SIZE GENMASK(11, 0) ++#define MREGBIT_BUFFER2_SIZE GENMASK(23, 12) ++#define MREGBIT_SECOND_ADDRESS_CHAINED BIT(25) ++#define MREGBIT_END_OF_RING BIT(26) ++ ++/* MAC_RECEIVE_DESCRIPTOR2 () register bit info */ ++#define MREGBIT_BUFFER_ADDRESS1 GENMASK(31, 0) ++ ++/* MAC_RECEIVE_DESCRIPTOR3 () register bit info */ ++#define MREGBIT_BUFFER_ADDRESS1 GENMASK(31, 0) ++ ++/* Transmit Descriptors */ ++/* TD_TRANSMIT_DESCRIPTOR0 () register bit info */ ++#define MREGBIT_TX_PACKET_STATUS GENMASK(29, 0) ++#define MREGBIT_OWN_BIT BIT(31) ++ ++/* TD_TRANSMIT_DESCRIPTOR1 () register bit info */ ++#define MREGBIT_BUFFER1_SIZE GENMASK(11, 0) ++#define MREGBIT_BUFFER2_SIZE GENMASK(23, 12) ++#define MREGBIT_FORCE_EOP_ERROR BIT(24) ++#define MREGBIT_SECOND_ADDRESS_CHAINED BIT(25) ++#define MREGBIT_END_OF_RING BIT(26) ++#define MREGBIT_DISABLE_PADDING BIT(27) ++#define MREGBIT_ADD_CRC_DISABLE BIT(28) ++#define MREGBIT_FIRST_SEGMENT BIT(29) ++#define MREGBIT_LAST_SEGMENT BIT(30) ++#define MREGBIT_INTERRUPT_ON_COMPLETION BIT(31) ++ ++/* TD_TRANSMIT_DESCRIPTOR2 () register bit info */ ++#define MREGBIT_BUFFER_ADDRESS1 GENMASK(31, 0) ++ ++/* TD_TRANSMIT_DESCRIPTOR3 () register bit info */ ++#define MREGBIT_BUFFER_ADDRESS1 GENMASK(31, 0) ++ ++/* RX frame status */ ++#define EMAC_RX_FRAME_ALIGN_ERR BIT(0) ++#define EMAC_RX_FRAME_RUNT BIT(1) ++#define EMAC_RX_FRAME_ETHERNET_TYPE BIT(2) ++#define EMAC_RX_FRAME_VLAN BIT(3) ++#define EMAC_RX_FRAME_MULTICAST BIT(4) ++#define EMAC_RX_FRAME_BROADCAST BIT(5) ++#define EMAC_RX_FRAME_CRC_ERR BIT(6) ++#define EMAC_RX_FRAME_MAX_LEN_ERR BIT(7) ++#define EMAC_RX_FRAME_JABBER_ERR BIT(8) ++#define EMAC_RX_FRAME_LENGTH_ERR BIT(9) ++#define EMAC_RX_FRAME_MAC_ADDR1_MATCH BIT(10) ++#define EMAC_RX_FRAME_MAC_ADDR2_MATCH BIT(11) ++#define EMAC_RX_FRAME_MAC_ADDR3_MATCH BIT(12) ++#define EMAC_RX_FRAME_MAC_ADDR4_MATCH BIT(13) ++#define EMAC_RX_FRAME_PAUSE_CTRL BIT(14) ++ ++/* emac ptp 1588 register */ ++#define PTP_1588_CTRL (0x300) ++#define TX_TIMESTAMP_EN BIT(1) ++#define RX_TIMESTAMP_EN BIT(2) ++#define RX_PTP_PKT_TYPE_OFST 3 ++#define RX_PTP_PKT_TYPE_MSK GENMASK(5, 3) ++ ++#define PTP_INRC_ATTR (0x304) ++#define INRC_VAL_MSK GENMASK(23, 0) ++#define INCR_PERIOD_OFST 24 ++#define INCR_PERIOD_MSK GENMASK(31, 24) ++ ++#define PTP_ETH_TYPE (0x308) ++#define PTP_ETH_TYPE_MSK GENMASK(15, 0) ++ ++#define PTP_MSG_ID (0x30c) ++ ++#define PTP_UDP_PORT (0x310) ++#define PTP_UDP_PORT_MSK GENMASK(15, 0) ++ ++/* read current system time from controller */ ++#define SYS_TIME_GET_LOW (0x320) ++#define SYS_TIME_GET_HI (0x324) ++ ++#define SYS_TIME_ADJ_LOW (0x328) ++#define SYS_TIME_LOW_MSK GENMASK(31, 0) ++#define SYS_TIME_ADJ_HI (0x32c) ++#define SYS_TIME_IS_NEG BIT(31) ++ ++#define TX_TIMESTAMP_LOW (0x330) ++#define TX_TIMESTAMP_HI (0x334) ++ ++#define RX_TIMESTAMP_LOW (0x340) ++#define RX_TIMESTAMP_HI (0x344) ++ ++#define RX_PTP_PKT_ATTR_LOW (0x348) ++#define PTP_SEQ_ID_MSK GENMASK(15, 0) ++#define PTP_SRC_ID_LOW_OFST 16 ++#define PTP_SRC_ID_LOW_MSK GENMASK(31, 16) ++ ++#define RX_PTP_PKT_ATTR_MID (0x34c) ++#define PTP_SRC_ID_MID_MSK GENMASK(31, 0) ++ ++#define RX_PTP_PKT_ATTR_HI (0x350) ++#define PTP_SRC_ID_HI_MSK GENMASK(31, 0) ++ ++#define PTP_1588_IRQ_STS (0x360) ++#define PTP_1588_IRQ_EN (0x364) ++#define PTP_TX_TIMESTAMP BIT(0) ++#define PTP_RX_TIMESTAMP BIT(1) ++ ++#define EMAC_DEFAULT_BUFSIZE 1536 ++#define EMAC_RX_BUF_2K 2048 ++#define EMAC_RX_BUF_4K 4096 ++ ++#define MAX_DATA_PWR_TX_DES 11 ++#define MAX_DATA_LEN_TX_DES 2048 ++ ++#define MAX_TX_STATS_NUM 12 ++#define MAX_RX_STATS_NUM 25 ++ ++/* The sizes (in bytes) of a ethernet packet */ ++#define ETHERNET_HEADER_SIZE 14 ++#define MAXIMUM_ETHERNET_FRAME_SIZE 1518 ++#define MINIMUM_ETHERNET_FRAME_SIZE 64 ++#define ETHERNET_FCS_SIZE 4 ++#define MAXIMUM_ETHERNET_PACKET_SIZE (MAXIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE) ++#define MINIMUM_ETHERNET_PACKET_SIZE (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE) ++ ++#define CRC_LENGTH ETHERNET_FCS_SIZE ++#define MAX_JUMBO_FRAME_SIZE 0x3F00 ++ ++#define TX_STORE_FORWARD_MODE 0x5EE ++ ++#define EMAC_TX_FRAMES 64 ++/* 40ms */ ++#define EMAC_TX_COAL_TIMEOUT 40000 ++ ++#define DEFAULT_RX_COAL_FRAMES 64 ++ ++/* axi clk 312M, 1us = 312 cycle, ++ * every packet almost take 12us when operate at 1000Mbps ++ * so we set 10 packet delay time which 120us as rx coal timeout ++ */ ++#define DEFAULT_RX_COAL_TIMEOUT 120 ++ ++#define AXI_CLK_CYCLES_PER_US 312 ++ ++#define MAX_RX_COAL_FRAMES 255 ++ ++#define MIN_RX_COAL_FRAMES 1 ++ ++#define MAX_RX_COAL_TIMEOUT 3000 ++ ++#define MIN_RX_COAL_TIMEOUT 24 ++ ++/* only works for sizes that are powers of 2 */ ++#define EMAC_ROUNDUP(i, size) ({ \ ++ u32 __val = (i); \ ++ u32 __align = (size); \ ++ __val = (__val + __align - 1) & ~(__align - 1); \ ++ __val; \ ++}) ++ ++/* number of descriptors are required for len */ ++#define EMAC_TXD_COUNT(S, X) (((S) >> (X)) + 1) ++ ++/* calculate the number of descriptors unused */ ++#define EMAC_DESC_UNUSED(R) ({ \ ++ typeof(R) __r = (R); \ ++ ((__r->nxt_clean > __r->nxt_use) ? 0 : __r->total_cnt) + \ ++ __r->nxt_clean - __r->nxt_use - 1; \ ++}) ++ ++enum rx_frame_status { ++ FRAME_OK = 0, ++ FRAME_DISCARD, ++ FRAME_MAX, ++}; ++ ++enum rx_ptp_type { ++ PTP_V2_L2_ONLY = 0x0, ++ PTP_V1_L4_ONLY = 0x1, ++ PTP_V2_L2_L4 = 0x2, ++}; ++ ++enum ptp_event_msg_id { ++ MSG_SYNC = 0x00, ++ MSG_DELAY_REQ = 0x01, ++ MSG_PDELAY_REQ = 0x02, ++ MSG_PDELAY_RESP = 0x03, ++ ALL_EVENTS = 0x03020100, ++}; ++ ++enum emac_state { ++ EMAC_DOWN, ++ EMAC_RESET_REQUESTED, ++ EMAC_RESETING, ++ EMAC_TASK_SCHED, ++ EMAC_STATE_MAX, ++}; ++ ++/* Receive Descriptor structure */ ++struct emac_rx_desc { ++ u32 frm_packet_len:14; ++ u32 apllication_status:15; ++ u32 last_descriptor:1; ++ u32 first_descriptor:1; ++ u32 own:1; ++ ++ u32 buf_size1:12; ++ u32 buf_size2:12; ++ u32 reserv1:1; ++ u32 sencond_addr_valid:1; ++ u32 end_ring:1; ++ u32 reserv2:3; ++ u32 rx_timestamp:1; ++ u32 ptp_pkt:1; ++ ++ u32 buf_addr1; ++ u32 buf_addr2; ++}; ++ ++/* Transmit Descriptor */ ++struct emac_tx_desc { ++ u32 frm_packet_staus:30; ++ u32 tx_timestamp:1; ++ u32 own:1; ++ ++ u32 buf_size1:12; ++ u32 buf_size2:12; ++ u32 force_eop_err:1; ++ u32 sencond_addr_valid:1; ++ u32 end_ring:1; ++ u32 disabled_padding:1; ++ u32 add_crc_disable:1; ++ u32 first_segment:1; ++ u32 last_segment:1; ++ u32 int_on_complet:1; ++ ++ u32 buf_addr1; ++ u32 buf_addr2; ++}; ++ ++struct desc_buf { ++ u64 dma_addr; ++ void *buff_addr; ++ u16 dma_len; ++ u8 map_as_page; ++}; ++ ++/* Descriptor buffer structure */ ++struct emac_tx_desc_buffer { ++ struct sk_buff *skb; ++ struct desc_buf buf[2]; ++ u8 timestamped; ++}; ++ ++/* Descriptor buffer structure */ ++struct emac_desc_buffer { ++ struct sk_buff *skb; ++ u64 dma_addr; ++ void *buff_addr; ++ unsigned long time_stamp; ++ u16 dma_len; ++ u8 map_as_page; ++ u8 timestamped; ++}; ++ ++/* Descriptor ring structure */ ++struct emac_desc_ring { ++ /* virtual memory address to the descriptor ring memory */ ++ void *desc_addr; ++ /* physical address of the descriptor ring */ ++ dma_addr_t desc_dma_addr; ++ /* length of descriptor ring in bytes */ ++ u32 total_size; ++ /* number of descriptors in the ring */ ++ u32 total_cnt; ++ /* next descriptor to associate a buffer with */ ++ u32 head; ++ /* next descriptor to check for DD status bit */ ++ u32 tail; ++ /* array of buffer information structs */ ++ union { ++ struct emac_desc_buffer *desc_buf; ++ struct emac_tx_desc_buffer *tx_desc_buf; ++ }; ++}; ++ ++struct emac_hw_stats { ++ u32 tx_ok_pkts; ++ u32 tx_total_pkts; ++ u32 tx_ok_bytes; ++ u32 tx_err_pkts; ++ u32 tx_singleclsn_pkts; ++ u32 tx_multiclsn_pkts; ++ u32 tx_lateclsn_pkts; ++ u32 tx_excessclsn_pkts; ++ u32 tx_unicast_pkts; ++ u32 tx_multicast_pkts; ++ u32 tx_broadcast_pkts; ++ u32 tx_pause_pkts; ++ u32 rx_ok_pkts; ++ u32 rx_total_pkts; ++ u32 rx_crc_err_pkts; ++ u32 rx_align_err_pkts; ++ u32 rx_err_total_pkts; ++ u32 rx_ok_bytes; ++ u32 rx_total_bytes; ++ u32 rx_unicast_pkts; ++ u32 rx_multicast_pkts; ++ u32 rx_broadcast_pkts; ++ u32 rx_pause_pkts; ++ u32 rx_len_err_pkts; ++ u32 rx_len_undersize_pkts; ++ u32 rx_len_oversize_pkts; ++ u32 rx_len_fragment_pkts; ++ u32 rx_len_jabber_pkts; ++ u32 rx_64_pkts; ++ u32 rx_65_127_pkts; ++ u32 rx_128_255_pkts; ++ u32 rx_256_511_pkts; ++ u32 rx_512_1023_pkts; ++ u32 rx_1024_1518_pkts; ++ u32 rx_1519_plus_pkts; ++ u32 rx_drp_fifo_full_pkts; ++ u32 rx_truncate_fifo_full_pkts; ++ ++ /* protect hardware stat operation */ ++ spinlock_t stats_lock; ++}; ++ ++struct emac_priv; ++struct emac_hw_ptp { ++ void (*config_hw_tstamping)(struct emac_priv *priv, u32 enable, u8 rx_ptp_type, ++ u32 ptp_msg_id); ++ u32 (*config_systime_increment)(struct emac_priv *priv, u32 ptp_clock, u32 adj_clock); ++ int (*init_systime)(struct emac_priv *priv, u64 set_ns); ++ u64 (*get_phc_time)(struct emac_priv *priv); ++ u64 (*get_tx_timestamp)(struct emac_priv *priv); ++ u64 (*get_rx_timestamp)(struct emac_priv *priv); ++}; ++ ++struct emac_priv { ++ u32 dma_buf_sz; ++ u32 wol; ++ struct work_struct tx_timeout_task; ++ struct emac_desc_ring tx_ring; ++ struct emac_desc_ring rx_ring; ++ struct net_device *ndev; ++ struct napi_struct napi; ++ struct platform_device *pdev; ++ struct clk *mac_clk; ++ struct clk *phy_clk; ++ struct clk *ptp_clk; ++ struct reset_control *reset; ++ void __iomem *iobase; ++ u32 apmu_base; ++ int irq; ++ int link; ++ int duplex; ++ int speed; ++ phy_interface_t phy_interface; ++ struct mii_bus *mii; ++ struct phy_device *phy; ++ struct emac_hw_stats *hw_stats; ++ u8 tx_clk_phase; ++ u8 rx_clk_phase; ++ u8 clk_tuning_way; ++ bool clk_tuning_enable; ++ unsigned long state; ++ u32 tx_threshold; ++ u32 rx_threshold; ++ u32 tx_ring_num; ++ u32 rx_ring_num; ++ u32 dma_burst_len; ++ u32 ref_clk_frm_soc; ++ void __iomem *ctrl_reg; ++ void __iomem *dline_reg; ++ s32 lpm_qos; ++ u32 tx_count_frames; ++ u32 tx_coal_frames; ++ u32 tx_coal_timeout; ++ u32 rx_coal_frames; ++ u32 rx_coal_timeout; ++ struct timer_list txtimer; ++ struct ptp_clock *ptp_clock; ++ struct ptp_clock_info ptp_clock_ops; ++ int ptp_support; ++ u32 ptp_clk_rate; ++ int hwts_tx_en; ++ int hwts_rx_en; ++ struct emac_hw_ptp *hwptp; ++ struct delayed_work systim_overflow_work; ++ struct cyclecounter cc; ++ struct timecounter tc; ++ ++ /*spinlock for ptp counter operatioin */ ++ spinlock_t ptp_lock; ++}; ++ ++static inline void emac_wr(struct emac_priv *priv, u32 reg, u32 val) ++{ ++ writel(val, (priv->iobase + reg)); ++} ++ ++static inline int emac_rd(struct emac_priv *priv, u32 reg) ++{ ++ return readl(priv->iobase + reg); ++} ++ ++#endif /* _K1X_EMAC_H_ */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig +index 92d7d5a00b84..c47253cda6f3 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig ++++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig +@@ -216,6 +216,24 @@ config DWMAC_SUN8I + stmmac device driver. This driver is used for H3/A83T/A64 + EMAC ethernet controller. + ++config DWMAC_XUANTIE ++ tristate "XuanTie dwmac support" ++ depends on OF && (ARCH_XUANTIE || COMPILE_TEST) ++ select MFD_SYSCON ++ help ++ Support for ethernet controllers on XuanTie RISC-V SoCs ++ ++ This selects the XuanTie platform specific glue layer support for ++ the stmmac device driver. This driver is used for XuanTie TH1520 ++ ethernet controller. ++ ++config DWMAC_SOPHGO ++ tristate "SOPHGO SG2042 GMAC support" ++ default ARCH_SOPHGO ++ depends on OF && (ARCH_SOPHGO || COMPILE_TEST) ++ help ++ BM-ethernet for sg2042 ++ + config DWMAC_IMX8 + tristate "NXP IMX8 DWMAC support" + default ARCH_MXC +@@ -255,6 +273,17 @@ config DWMAC_VISCONTI + help + Support for ethernet controller on Visconti SoCs. + ++config DWMAC_ULTRARISC ++ tristate "UltraRISC DP1000 DWMAC support" ++ default ARCH_ULTRARISC ++ depends on OF ++ help ++ Support for ethernet controller on DP1000 SoCs. ++ ++ This selects the UltraRISC platform specific glue layer support for ++ the stmmac device driver. This driver is used for UltraRISC DP1000 ++ ethernet controller. ++ + endif + + config DWMAC_INTEL +diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile +index 5b57aee19267..e8be667cd81a 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/Makefile ++++ b/drivers/net/ethernet/stmicro/stmmac/Makefile +@@ -22,17 +22,20 @@ obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o + obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o + obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o + obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o ++obj-$(CONFIG_DWMAC_SOPHGO) += dwmac-sophgo.o + obj-$(CONFIG_DWMAC_STARFIVE) += dwmac-starfive.o + obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o + obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o + obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o + obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o ++obj-$(CONFIG_DWMAC_XUANTIE) += dwmac-xuantie.o + obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o + obj-$(CONFIG_DWMAC_INTEL_PLAT) += dwmac-intel-plat.o + obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o + obj-$(CONFIG_DWMAC_IMX8) += dwmac-imx.o + obj-$(CONFIG_DWMAC_TEGRA) += dwmac-tegra.o + obj-$(CONFIG_DWMAC_VISCONTI) += dwmac-visconti.o ++obj-$(CONFIG_DWMAC_ULTRARISC) += dwmac-ultrarisc.o + stmmac-platform-objs:= stmmac_platform.o + dwmac-altr-socfpga-objs := dwmac-socfpga.o + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c +new file mode 100644 +index 000000000000..50a76c8f0df6 +--- /dev/null ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c +@@ -0,0 +1,268 @@ ++/* ++ * DWMAC specific glue layer ++ * ++ * Copyright (c) 2018 Bitmain Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "stmmac_platform.h" ++ ++struct bm_mac { ++ struct device *dev; ++ struct reset_control *rst; ++ struct clk *clk_tx; ++ struct clk *gate_clk_tx; ++ struct clk *gate_clk_ref; ++ struct gpio_desc *reset; ++}; ++ ++static u64 bm_dma_mask = DMA_BIT_MASK(40); ++ ++static int bm_eth_reset_phy(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ int phy_reset_gpio; ++ ++ if (!np) ++ return 0; ++ ++ phy_reset_gpio = of_get_named_gpio(np, "phy-reset-gpios", 0); ++ ++ if (phy_reset_gpio < 0) ++ return 0; ++ ++ if (gpio_request(phy_reset_gpio, "eth-phy-reset")) ++ return 0; ++ ++ /* RESET_PU */ ++ gpio_direction_output(phy_reset_gpio, 0); ++ mdelay(100); ++ ++ gpio_direction_output(phy_reset_gpio, 1); ++ /* RC charging time */ ++ mdelay(100); ++ ++ return 0; ++} ++ ++static void bm_mac_fix_speed(void *priv, unsigned int speed, unsigned int mode) ++{ ++ struct bm_mac *bsp_priv = priv; ++ unsigned long rate = 125000000; ++ bool needs_calibration = false; ++ int err; ++ ++ switch (speed) { ++ case SPEED_1000: ++ needs_calibration = true; ++ rate = 125000000; ++ break; ++ ++ case SPEED_100: ++ needs_calibration = true; ++ rate = 25000000; ++ break; ++ ++ case SPEED_10: ++ needs_calibration = true; ++ rate = 2500000; ++ break; ++ ++ default: ++ dev_err(bsp_priv->dev, "invalid speed %u\n", speed); ++ break; ++ } ++ ++ if (needs_calibration) { ++ err = clk_set_rate(bsp_priv->clk_tx, rate); ++ if (err < 0) ++ dev_err(bsp_priv->dev, "failed to set TX rate: %d\n" ++ , err); ++ } ++} ++ ++void bm_dwmac_exit(struct platform_device *pdev, void *priv) ++{ ++ struct bm_mac *bsp_priv = priv; ++ ++ clk_disable_unprepare(bsp_priv->gate_clk_tx); ++ clk_disable_unprepare(bsp_priv->gate_clk_ref); ++} ++ ++static int bm_validate_ucast_entries(struct device *dev, int ucast_entries) ++{ ++ int x = ucast_entries; ++ ++ switch (x) { ++ case 1 ... 32: ++ case 64: ++ case 128: ++ break; ++ default: ++ x = 1; ++ dev_info(dev, "Unicast table entries set to unexpected value %d\n", ++ ucast_entries); ++ break; ++ } ++ return x; ++} ++ ++static int bm_validate_mcast_bins(struct device *dev, int mcast_bins) ++{ ++ int x = mcast_bins; ++ ++ switch (x) { ++ case HASH_TABLE_SIZE: ++ case 128: ++ case 256: ++ break; ++ default: ++ x = 0; ++ dev_info(dev, "Hash table entries set to unexpected value %d\n", ++ mcast_bins); ++ break; ++ } ++ return x; ++} ++ ++static void bm_dwmac_probe_config_dt(struct platform_device *pdev, struct plat_stmmacenet_data *plat) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ ++ of_property_read_u32(np, "snps,multicast-filter-bins", &plat->multicast_filter_bins); ++ of_property_read_u32(np, "snps,perfect-filter-entries", &plat->unicast_filter_entries); ++ plat->unicast_filter_entries = bm_validate_ucast_entries(&pdev->dev, ++ plat->unicast_filter_entries); ++ plat->multicast_filter_bins = bm_validate_mcast_bins(&pdev->dev, ++ plat->multicast_filter_bins); ++ plat->flags |= (STMMAC_FLAG_TSO_EN); ++ plat->has_gmac4 = 1; ++ plat->has_gmac = 0; ++ plat->pmt = 0; ++} ++ ++static int bm_dwmac_probe(struct platform_device *pdev) ++{ ++ struct plat_stmmacenet_data *plat_dat; ++ struct stmmac_resources stmmac_res; ++ struct bm_mac *bsp_priv = NULL; ++ struct phy_device *phydev = NULL; ++ struct stmmac_priv *priv = NULL; ++ struct net_device *ndev = NULL; ++ int ret; ++ ++ pdev->dev.dma_mask = &bm_dma_mask; ++ pdev->dev.coherent_dma_mask = bm_dma_mask; ++ ++ bm_eth_reset_phy(pdev); ++ ++ ret = stmmac_get_platform_resources(pdev, &stmmac_res); ++ if (ret) ++ return ret; ++ ++ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); ++ if (IS_ERR(plat_dat)) ++ return PTR_ERR(plat_dat); ++ ++ bm_dwmac_probe_config_dt(pdev, plat_dat); ++ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); ++ if (ret) ++ goto err_remove_config_dt; ++ ++ bsp_priv = devm_kzalloc(&pdev->dev, sizeof(*bsp_priv), GFP_KERNEL); ++ if (!bsp_priv) ++ return PTR_ERR(bsp_priv); ++ ++ bsp_priv->dev = &pdev->dev; ++ ++ /* clock setup */ ++ bsp_priv->clk_tx = devm_clk_get(&pdev->dev, ++ "clk_tx"); ++ if (IS_ERR(bsp_priv->clk_tx)) ++ dev_warn(&pdev->dev, "Cannot get mac tx clock!\n"); ++ else ++ plat_dat->fix_mac_speed = bm_mac_fix_speed; ++ ++ bsp_priv->gate_clk_tx = devm_clk_get(&pdev->dev, "gate_clk_tx"); ++ if (IS_ERR(bsp_priv->gate_clk_tx)) ++ dev_warn(&pdev->dev, "Cannot get mac tx gating clock!\n"); ++ else ++ clk_prepare_enable(bsp_priv->gate_clk_tx); ++ ++ bsp_priv->gate_clk_ref = devm_clk_get(&pdev->dev, "gate_clk_ref"); ++ if (IS_ERR(bsp_priv->gate_clk_ref)) ++ dev_warn(&pdev->dev, "Cannot get mac ref gating clock!\n"); ++ else ++ clk_prepare_enable(bsp_priv->gate_clk_ref); ++ ++ plat_dat->bsp_priv = bsp_priv; ++ plat_dat->exit = bm_dwmac_exit; ++ ++ ndev = dev_get_drvdata(&pdev->dev); ++ priv = netdev_priv(ndev); ++ phydev = mdiobus_get_phy(priv->mii, 0); ++ if (phydev == NULL) { ++ dev_err(&pdev->dev, "Can not get phy in addr 0\n"); ++ goto err_remove_config_dt; ++ } ++ ++ /* set green LED0 active for transmit, yellow LED1 for link*/ ++ ret = phy_write_paged(phydev, 0, 0x1f, 0xd04); ++ if (ret < 0) ++ dev_err(&pdev->dev, "Can not select page 0xd04\n"); ++ ret = phy_write_paged(phydev, 0xd04, 0x10, 0x617f); ++ if (ret < 0) ++ dev_err(&pdev->dev, "Can not alter LED Configuration\n"); ++ /* disable eee LED function */ ++ ret = phy_write_paged(phydev, 0xd04, 0x11, 0x0); ++ if (ret < 0) ++ dev_err(&pdev->dev, "Can not disable EEE Configuration\n"); ++ ret = phy_write_paged(phydev, 0, 0x1f, 0); ++ if (ret < 0) ++ dev_err(&pdev->dev, "Can not select page 0\n"); ++ ++ return 0; ++ ++err_remove_config_dt: ++ stmmac_remove_config_dt(pdev, plat_dat); ++ ++ return ret; ++} ++ ++static const struct of_device_id bm_dwmac_match[] = { ++ { .compatible = "bitmain,ethernet" }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, bm_dwmac_match); ++ ++static struct platform_driver bm_dwmac_driver = { ++ .probe = bm_dwmac_probe, ++ .remove_new = stmmac_pltfr_remove, ++ .driver = { ++ .name = "bm-dwmac", ++ .pm = &stmmac_pltfr_pm_ops, ++ .of_match_table = bm_dwmac_match, ++ }, ++}; ++module_platform_driver(bm_dwmac_driver); ++ ++MODULE_AUTHOR("Wei Huang"); ++MODULE_DESCRIPTION("Bitmain DWMAC specific glue layer"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ultrarisc.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ultrarisc.c +new file mode 100644 +index 000000000000..99918a54d2c3 +--- /dev/null ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ultrarisc.c +@@ -0,0 +1,69 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* UltraRISC DWMAC platform driver ++ * ++ * Copyright(C) 2025 UltraRISC Technology Co., Ltd. ++ * ++ * Author: wangjiahao ++ */ ++ ++#include ++#include "stmmac_platform.h" ++ ++static const struct of_device_id ultrarisc_eth_plat_match[] = { ++ { ++ .compatible = "ultrarisc,dp1000-gmac", ++ }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, ultrarisc_eth_plat_match); ++ ++static int ultrarisc_eth_plat_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct plat_stmmacenet_data *plat_dat; ++ struct stmmac_resources stmmac_res; ++ int err; ++ ++ err = stmmac_get_platform_resources(pdev, &stmmac_res); ++ if (err) ++ return err; ++ ++ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); ++ if (IS_ERR(plat_dat)) { ++ dev_err(&pdev->dev, "dt configuration failed\n"); ++ return PTR_ERR(plat_dat); ++ } ++ ++ if (!is_of_node(dev->fwnode)) ++ goto err; ++ ++ err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); ++ if (err) ++ return err; ++ ++ return 0; ++err: ++ stmmac_remove_config_dt(pdev, plat_dat); ++ return err; ++} ++ ++static int ultrarisc_eth_plat_remove(struct platform_device *pdev) ++{ ++ stmmac_dvr_remove(&pdev->dev); ++ return 0; ++} ++ ++static struct platform_driver ultrarisc_eth_plat_driver = { ++ .probe = ultrarisc_eth_plat_probe, ++ .remove = ultrarisc_eth_plat_remove, ++ .driver = { ++ .name = "ultrarisc-eth-plat", ++ .pm = &stmmac_pltfr_pm_ops, ++ .of_match_table = ultrarisc_eth_plat_match, ++ }, ++}; ++ ++module_platform_driver(ultrarisc_eth_plat_driver); ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("UltraRISC DWMAC platform driver"); +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-xuantie.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-xuantie.c +new file mode 100644 +index 000000000000..ac5b3b968bf6 +--- /dev/null ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-xuantie.c +@@ -0,0 +1,584 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * XuanTie DWMAC platform driver ++ * ++ * Copyright (C) 2021 Alibaba Group Holding Limited. ++ * Copyright (C) 2023 Jisheng Zhang ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "stmmac_platform.h" ++ ++#define GMAC_CLK_EN 0x00 ++#define GMAC_TX_CLK_EN BIT(1) ++#define GMAC_TX_CLK_N_EN BIT(2) ++#define GMAC_TX_CLK_OUT_EN BIT(3) ++#define GMAC_RX_CLK_EN BIT(4) ++#define GMAC_RX_CLK_N_EN BIT(5) ++#define GMAC_EPHY_REF_CLK_EN BIT(6) ++#define GMAC_RXCLK_DELAY_CTRL 0x04 ++#define GMAC_RXCLK_BYPASS BIT(15) ++#define GMAC_RXCLK_INVERT BIT(14) ++#define GMAC_RXCLK_DELAY_MASK GENMASK(4, 0) ++#define GMAC_RXCLK_DELAY_VAL(x) FIELD_PREP(GMAC_RXCLK_DELAY_MASK, (x)) ++#define GMAC_TXCLK_DELAY_CTRL 0x08 ++#define GMAC_TXCLK_BYPASS BIT(15) ++#define GMAC_TXCLK_INVERT BIT(14) ++#define GMAC_TXCLK_DELAY_MASK GENMASK(4, 0) ++#define GMAC_TXCLK_DELAY_VAL(x) FIELD_PREP(GMAC_RXCLK_DELAY_MASK, (x)) ++#define GMAC_PLLCLK_DIV 0x0c ++#define GMAC_PLLCLK_DIV_EN BIT(31) ++#define GMAC_PLLCLK_DIV_MASK GENMASK(7, 0) ++#define GMAC_PLLCLK_DIV_NUM(x) FIELD_PREP(GMAC_PLLCLK_DIV_MASK, (x)) ++#define GMAC_CLK_PTP 0x14 ++#define GMAC_CLK_PTP_DIV_EN BIT(31) ++#define GMAC_CLK_PTP_DIV_MASK GENMASK(7, 0) ++#define GMAC_CLK_PTP_DIV_NUM(x) FIELD_PREP(GMAC_CLK_PTP_DIV_MASK, (x)) ++#define GMAC_GTXCLK_SEL 0x18 ++#define GMAC_GTXCLK_SEL_PLL BIT(0) ++#define GMAC_INTF_CTRL 0x1c ++#define PHY_INTF_MASK BIT(0) ++#define PHY_INTF_RGMII FIELD_PREP(PHY_INTF_MASK, 1) ++#define PHY_INTF_MII_GMII FIELD_PREP(PHY_INTF_MASK, 0) ++#define GMAC_TXCLK_OEN 0x20 ++#define TXCLK_DIR_MASK BIT(0) ++#define TXCLK_DIR_OUTPUT FIELD_PREP(TXCLK_DIR_MASK, 0) ++#define TXCLK_DIR_INPUT FIELD_PREP(TXCLK_DIR_MASK, 1) ++ ++#define GMAC_GMII_RGMII_RATE 125000000 ++#define GMAC_MII_RATE 25000000 ++#define GMAC_PTP_CLK_RATE 50000000 //50MHz ++ ++struct th1520_dwmac { ++ struct plat_stmmacenet_data *plat; ++ struct regmap *apb_regmap; ++ struct device *dev; ++ u32 rx_delay; ++ u32 tx_delay; ++ struct clk *gmac_axi_aclk; ++ struct clk *gmac_axi_pclk; ++}; ++ ++#define pm_debug dev_dbg /* for suspend/resume interface debug info */ ++ ++static int th1520_dwmac_set_phy_if(struct plat_stmmacenet_data *plat) ++{ ++ struct th1520_dwmac *dwmac = plat->bsp_priv; ++ u32 phyif; ++ ++ switch (plat->mac_interface) { ++ case PHY_INTERFACE_MODE_MII: ++ phyif = PHY_INTF_MII_GMII; ++ break; ++ case PHY_INTERFACE_MODE_RGMII: ++ case PHY_INTERFACE_MODE_RGMII_ID: ++ case PHY_INTERFACE_MODE_RGMII_TXID: ++ case PHY_INTERFACE_MODE_RGMII_RXID: ++ phyif = PHY_INTF_RGMII; ++ break; ++ default: ++ dev_err(dwmac->dev, "unsupported phy interface %d\n", ++ plat->mac_interface); ++ return -EINVAL; ++ }; ++ ++ regmap_write(dwmac->apb_regmap, GMAC_INTF_CTRL, phyif); ++ ++ return 0; ++} ++ ++static int th1520_dwmac_set_txclk_dir(struct plat_stmmacenet_data *plat) ++{ ++ struct th1520_dwmac *dwmac = plat->bsp_priv; ++ u32 txclk_dir; ++ ++ switch (plat->mac_interface) { ++ case PHY_INTERFACE_MODE_MII: ++ txclk_dir = TXCLK_DIR_INPUT; ++ break; ++ case PHY_INTERFACE_MODE_RGMII: ++ case PHY_INTERFACE_MODE_RGMII_ID: ++ case PHY_INTERFACE_MODE_RGMII_TXID: ++ case PHY_INTERFACE_MODE_RGMII_RXID: ++ txclk_dir = TXCLK_DIR_OUTPUT; ++ break; ++ default: ++ dev_err(dwmac->dev, "unsupported phy interface %d\n", ++ plat->mac_interface); ++ return -EINVAL; ++ }; ++ ++ regmap_write(dwmac->apb_regmap, GMAC_TXCLK_OEN, txclk_dir); ++ ++ return 0; ++} ++ ++static void th1520_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode) ++{ ++ struct th1520_dwmac *dwmac = priv; ++ struct plat_stmmacenet_data *plat = dwmac->plat; ++ unsigned long rate; ++ u32 div; ++ ++ switch (plat->mac_interface) { ++ /* For MII, rxc/txc is provided by phy */ ++ case PHY_INTERFACE_MODE_MII: ++ return; + + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: @@ -443350,6 +481452,86 @@ index 000000000000..5ab8469e3ec3 +MODULE_AUTHOR("Wei Liu "); +MODULE_DESCRIPTION("XuanTie TH1520 nvmem driver"); +MODULE_LICENSE("GPL"); +diff --git a/drivers/of/device.c b/drivers/of/device.c +index 873d933e8e6d..0681c220d114 100644 +--- a/drivers/of/device.c ++++ b/drivers/of/device.c +@@ -96,7 +96,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, + const struct bus_dma_region *map = NULL; + struct device_node *bus_np; + u64 dma_start = 0; +- u64 mask, end, size = 0; ++ u64 mask, end = 0; + bool coherent; + int iommu_ret; + int ret; +@@ -117,34 +117,9 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, + if (!force_dma) + return ret == -ENODEV ? 0 : ret; + } else { +- const struct bus_dma_region *r = map; +- u64 dma_end = 0; +- + /* Determine the overall bounds of all DMA regions */ +- for (dma_start = ~0; r->size; r++) { +- /* Take lower and upper limits */ +- if (r->dma_start < dma_start) +- dma_start = r->dma_start; +- if (r->dma_start + r->size > dma_end) +- dma_end = r->dma_start + r->size; +- } +- size = dma_end - dma_start; +- +- /* +- * Add a work around to treat the size as mask + 1 in case +- * it is defined in DT as a mask. +- */ +- if (size & 1) { +- dev_warn(dev, "Invalid size 0x%llx for dma-range(s)\n", +- size); +- size = size + 1; +- } +- +- if (!size) { +- dev_err(dev, "Adjusted size 0x%llx invalid\n", size); +- kfree(map); +- return -EINVAL; +- } ++ dma_start = dma_range_map_min(map); ++ end = dma_range_map_max(map); + } + + /* +@@ -158,16 +133,15 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, + dev->dma_mask = &dev->coherent_dma_mask; + } + +- if (!size && dev->coherent_dma_mask) +- size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); +- else if (!size) +- size = 1ULL << 32; ++ if (!end && dev->coherent_dma_mask) ++ end = dev->coherent_dma_mask; ++ else if (!end) ++ end = (1ULL << 32) - 1; + + /* + * Limit coherent and dma mask based on size and default mask + * set by the driver. + */ +- end = dma_start + size - 1; + mask = DMA_BIT_MASK(ilog2(end) + 1); + dev->coherent_dma_mask &= mask; + *dev->dma_mask &= mask; +@@ -201,7 +175,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, + } else + dev_dbg(dev, "device is behind an iommu\n"); + +- arch_setup_dma_ops(dev, dma_start, size, coherent); ++ arch_setup_dma_ops(dev, dma_start, end - dma_start + 1, coherent); + + if (iommu_ret) + of_dma_set_restricted_buffer(dev, np); diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig index 291d12711363..25c768d5afb4 100644 --- a/drivers/pci/controller/cadence/Kconfig @@ -443383,10 +481565,10 @@ index 9bac5fb2f13d..edac7c5e94a3 100644 +obj-$(CONFIG_PCIE_CADENCE_SOPHGO) += pcie-cadence-sophgo.o diff --git a/drivers/pci/controller/cadence/pcie-cadence-sophgo.c b/drivers/pci/controller/cadence/pcie-cadence-sophgo.c new file mode 100644 -index 000000000000..6f0a92c632b3 +index 000000000000..70a23b3563fb --- /dev/null +++ b/drivers/pci/controller/cadence/pcie-cadence-sophgo.c -@@ -0,0 +1,973 @@ +@@ -0,0 +1,936 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017 Cadence +// Cadence PCIe host controller driver. @@ -443849,43 +482031,6 @@ index 000000000000..6f0a92c632b3 + .chip = &cdns_pcie_msi_irq_chip, +}; + -+struct vendor_id_list vendor_id_list[] = { -+ {"Inter X520", 0x8086, 0x10fb}, -+ {"Inter I40E", 0x8086, 0x1572}, -+ //{"WangXun RP1000", 0x8088}, -+ {"Switchtec", 0x11f8,0x4052}, -+ {"Mellanox ConnectX-2", 0x15b3, 0x6750} -+}; -+ -+size_t vendor_id_list_num = ARRAY_SIZE(vendor_id_list); -+ -+int check_vendor_id(struct pci_dev *dev, struct vendor_id_list vendor_id_list[], -+ size_t vendor_id_list_num) -+{ -+ uint16_t device_vendor_id; -+ uint16_t device_id; -+ -+ if (pci_read_config_word(dev, PCI_VENDOR_ID, &device_vendor_id) != 0) { -+ pr_err("Failed to read device vendor ID\n"); -+ return 0; -+ } -+ -+ if (pci_read_config_word(dev, PCI_DEVICE_ID, &device_id) != 0) { -+ pr_err("Failed to read device vendor ID\n"); -+ return 0; -+ } -+ -+ for (int i = 0; i < vendor_id_list_num; ++i) { -+ if (device_vendor_id == vendor_id_list[i].vendor_id && device_id == vendor_id_list[i].device_id) { -+ pr_info("dev: %s vendor ID: 0x%04x device ID: 0x%04x Enable MSI-X IRQ\n", -+ vendor_id_list[i].name, device_vendor_id, device_id); -+ return 1; -+ } -+ } -+ return 0; -+} -+ -+ +static int cdns_pcie_msi_setup_for_top_intc(struct cdns_mango_pcie_rc *rc, int intc_id) +{ + struct irq_domain *irq_parent = cdns_pcie_get_parent_irq_domain(intc_id); @@ -444362,156 +482507,2776 @@ index 000000000000..6f0a92c632b3 +builtin_platform_driver(cdns_pcie_host_driver); diff --git a/drivers/pci/controller/cadence/pcie-cadence-sophgo.h b/drivers/pci/controller/cadence/pcie-cadence-sophgo.h new file mode 100644 -index 000000000000..ef46c46678ed +index 000000000000..be36e164e8e4 --- /dev/null +++ b/drivers/pci/controller/cadence/pcie-cadence-sophgo.h -@@ -0,0 +1,17 @@ +@@ -0,0 +1,6 @@ +#ifndef PCIE_CADENCE_SOPHGO +#define PCIE_CADENCE_SOPHGO + ++extern struct irq_domain *cdns_pcie_get_parent_irq_domain(int intc_id); ++ ++#endif +diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig +index ab96da43e0c2..086266a8d62e 100644 +--- a/drivers/pci/controller/dwc/Kconfig ++++ b/drivers/pci/controller/dwc/Kconfig +@@ -14,6 +14,17 @@ config PCIE_DW_EP + bool + select PCIE_DW + ++config PCIE_DW_SOPHGO ++ bool "DesignWare Sophgo PCIe Host controller" ++ depends on OF || ACPI ++ select IRQ_DOMAIN ++ select PCIE_DW ++ help ++ Say Y here if you want to support the DesignWare PCIe controller in host mode ++ for Sophgo SoCs. this PCIe controller is from DesignWare, integrated into the ++ Sophgo SoCs. PCIe is one of subsystems, it is choisable, Don't be ++ care of this if it is not used in your systems. ++ + config PCIE_AL + bool "Amazon Annapurna Labs PCIe controller" + depends on OF && (ARM64 || COMPILE_TEST) +@@ -415,4 +426,19 @@ config PCIE_VISCONTI_HOST + Say Y here if you want PCIe controller support on Toshiba Visconti SoC. + This driver supports TMPV7708 SoC. + ++config PCIE_ULTRARISC ++ bool "UltraRISC PCIe host controller" ++ depends on ARCH_ULTRARISC || COMPILE_TEST ++ select PCIE_DW_HOST ++ select PCI_MSI ++ default y if ARCH_ULTRARISC ++ help ++ Enables support for the PCIe controller in the UltraRISC SoC. ++ This driver supports UR-DP1000 SoC. When selected, it automatically ++ enables both `PCIE_DW_HOST` and `PCI_MSI`, ensuring proper support ++ for MSI-based interrupt handling in the PCIe controller. ++ By default, this symbol is enabled when `ARCH_ULTRARISC` is active, ++ requiring no further configuration on that platform. ++ ++ + endmenu +diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile +index bf5c311875a1..b8ce73d09446 100644 +--- a/drivers/pci/controller/dwc/Makefile ++++ b/drivers/pci/controller/dwc/Makefile +@@ -3,6 +3,7 @@ obj-$(CONFIG_PCIE_DW) += pcie-designware.o + obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o + obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o + obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o ++obj-$(CONFIG_PCIE_DW_SOPHGO) += pcie-dw-sophgo.o + obj-$(CONFIG_PCIE_BT1) += pcie-bt1.o + obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o + obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o +@@ -26,6 +27,7 @@ obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o + obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o + obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o + obj-$(CONFIG_PCIE_VISCONTI_HOST) += pcie-visconti.o ++obj-$(CONFIG_PCIE_ULTRARISC) += pcie-ultrarisc.o + + # The following drivers are for devices that use the generic ACPI + # pci_root.c driver but don't support standard ECAM config access. +diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c +index a7170fd0e847..5699c374ce48 100644 +--- a/drivers/pci/controller/dwc/pcie-designware-host.c ++++ b/drivers/pci/controller/dwc/pcie-designware-host.c +@@ -45,6 +45,9 @@ static struct irq_chip dw_pcie_msi_irq_chip = { + .irq_ack = dw_msi_ack_irq, + .irq_mask = dw_msi_mask_irq, + .irq_unmask = dw_msi_unmask_irq, ++#if defined CONFIG_SMP && defined CONFIG_PCIE_ULTRARISC ++ .irq_set_affinity = irq_chip_set_affinity_parent, ++#endif + }; + + static struct msi_domain_info dw_pcie_msi_domain_info = { +@@ -116,6 +119,34 @@ static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) + (int)d->hwirq, msg->address_hi, msg->address_lo); + } + ++/* ++ * Set affinity for DP1000 MSI interrupts ++ * This is used for platforms that require setting the affinity of MSI ++ * interrupts to a specific CPU. ++ * ++ * @d: irq_data structure for the MSI interrupt ++ * @mask: cpumask to set the affinity to ++ * @force: if true, force the affinity to be set, even if it is already set ++ * ++ * Return: 0 on success, -EINVAL on failure ++ */ ++static int dw_pci_msi_set_affinity_dp1000(struct irq_data *d, ++ const struct cpumask *mask, bool force) ++{ ++ struct irq_domain *domain = d->domain; ++ struct dw_pcie_rp *pp = domain->host_data; ++ struct irq_desc *desc; ++ struct irq_data *data; ++ ++ desc = irq_to_desc(pp->msi_irq[0]); ++ data = &(desc->irq_data); ++ ++ if (data->chip->irq_set_affinity) ++ return data->chip->irq_set_affinity(data, mask, force); ++ ++ return -EINVAL; ++} ++ + static int dw_pci_msi_set_affinity(struct irq_data *d, + const struct cpumask *mask, bool force) + { +@@ -177,7 +208,11 @@ static struct irq_chip dw_pci_msi_bottom_irq_chip = { + .name = "DWPCI-MSI", + .irq_ack = dw_pci_bottom_ack, + .irq_compose_msi_msg = dw_pci_setup_msi_msg, ++#if defined CONFIG_PCIE_ULTRARISC ++ .irq_set_affinity = dw_pci_msi_set_affinity_dp1000, ++#else + .irq_set_affinity = dw_pci_msi_set_affinity, ++#endif + .irq_mask = dw_pci_bottom_mask, + .irq_unmask = dw_pci_bottom_unmask, + }; +@@ -640,10 +675,57 @@ EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus); + + static struct pci_ops dw_pcie_ops = { + .map_bus = dw_pcie_own_conf_map_bus, ++#if IS_ENABLED(CONFIG_PCIE_ULTRARISC) ++ .read = pci_generic_config_read32, ++ .write = pci_generic_config_write32, ++#else + .read = pci_generic_config_read, + .write = pci_generic_config_write, ++#endif + }; + ++/** ++ * dw_pcie_prog_outbound_atu_dp1000 - Program multiple outbound ATU windows of DP1000 ++ * @pci: PCIe controller instance ++ * @entry: resource entry to program ++ * @index: pointer to the current ATU index, updated on success ++ * ++ * This function programs multiple outbound ATU windows for a given resource ++ * entry, splitting it into smaller windows if necessary. ++ * ++ * Returns 0 on success, or a negative error code on failure. ++ */ ++static int dw_pcie_prog_outbound_atu_dp1000(struct dw_pcie *pci, ++ struct resource_entry *entry, ++ int *index) ++{ ++ resource_size_t res_start, res_size, window_size; ++ int i, ret; ++ ++ res_start = entry->res->start; ++ res_size = resource_size(entry->res); ++ ++ i = *index; ++ while (res_size > 0) { ++ window_size = res_size > (pci->region_limit + 1) ? ++ (pci->region_limit + 1) : res_size; ++ ++ ret = dw_pcie_prog_outbound_atu(pci, i++, PCIE_ATU_TYPE_MEM, ++ res_start, ++ res_start - entry->offset, ++ window_size); ++ if (ret) ++ return ret; ++ ++ res_start += window_size; ++ res_size -= window_size; ++ } ++ ++ *index = i; ++ ++ return 0; ++} ++ + static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) + { + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +@@ -674,10 +756,14 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) + if (pci->num_ob_windows <= ++i) + break; + ++#if IS_ENABLED(CONFIG_PCIE_ULTRARISC) ++ ret = dw_pcie_prog_outbound_atu_dp1000(pci, entry, &i); ++#else + ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM, + entry->res->start, + entry->res->start - entry->offset, + resource_size(entry->res)); ++#endif + if (ret) { + dev_err(pci->dev, "Failed to set MEM range %pr\n", + entry->res); +diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h +index ef0b2efa9f93..f46ce5f0dabb 100644 +--- a/drivers/pci/controller/dwc/pcie-designware.h ++++ b/drivers/pci/controller/dwc/pcie-designware.h +@@ -69,6 +69,13 @@ + #define LINK_WAIT_MAX_IATU_RETRIES 5 + #define LINK_WAIT_IATU 9 + ++/* PCIE_PORT_FORCE: Port Force Link Register ++ * This register can be used for testing and debuggong the link. ++ * Bit[7:0] LINK_NUM: Link Number. Not used for endpoint. ++ */ ++#define PCIE_PORT_FORCE 0x708 ++#define PORT_LINK_NUM_MASK GENMASK(7, 0) ++ + /* Synopsys-specific PCIe configuration registers */ + #define PCIE_PORT_AFR 0x70C + #define PORT_AFR_N_FTS_MASK GENMASK(15, 8) +@@ -90,6 +97,27 @@ + #define PORT_LINK_MODE_2_LANES PORT_LINK_MODE(0x3) + #define PORT_LINK_MODE_4_LANES PORT_LINK_MODE(0x7) + #define PORT_LINK_MODE_8_LANES PORT_LINK_MODE(0xf) ++#define PORT_LINK_MODE_16_LANES PORT_LINK_MODE(0x1f) ++ ++/* ++ * PCIE_TIMER_CTRL_MAX_FUNC_NUM: Timer Control and Max Function Number Register. ++ * This register holds the ack frequency, latency, replay, fast link scaling timers, ++ * and max function number values. ++ * Bit[30:29] FAST_LINK_SCALING_FACTOR: Fast Link Timer Scaling Factor. ++ * 0x0 (SF_1024):Scaling Factor is 1024 (1ms is 1us). ++ * When the LTSSM is in Config or L12 Entry State, 1ms ++ * timer is 2us, 2ms timer is 4us and 3ms timer is 6us. ++ * 0x1 (SF_256): Scaling Factor is 256 (1ms is 4us) ++ * 0x2 (SF_64): Scaling Factor is 64 (1ms is 16us) ++ * 0x3 (SF_16): Scaling Factor is 16 (1ms is 64us) ++ */ ++#define PCIE_TIMER_CTRL_MAX_FUNC_NUM 0x718 ++#define PORT_FLT_SF_MASK GENMASK(30, 29) ++#define PORT_FLT_SF(n) FIELD_PREP(PORT_FLT_SF_MASK, n) ++#define PORT_FLT_SF_1024 PORT_FLT_SF(0x0) ++#define PORT_FLT_SF_256 PORT_FLT_SF(0x1) ++#define PORT_FLT_SF_64 PORT_FLT_SF(0x2) ++#define PORT_FLT_SF_16 PORT_FLT_SF(0x3) + + #define PCIE_PORT_DEBUG0 0x728 + #define PORT_LOGIC_LTSSM_STATE_MASK 0x1f +@@ -107,6 +135,7 @@ + #define PORT_LOGIC_LINK_WIDTH_2_LANES PORT_LOGIC_LINK_WIDTH(0x2) + #define PORT_LOGIC_LINK_WIDTH_4_LANES PORT_LOGIC_LINK_WIDTH(0x4) + #define PORT_LOGIC_LINK_WIDTH_8_LANES PORT_LOGIC_LINK_WIDTH(0x8) ++#define PORT_LOGIC_LINK_WIDTH_16_LANES PORT_LOGIC_LINK_WIDTH(0x10) + + #define PCIE_MSI_ADDR_LO 0x820 + #define PCIE_MSI_ADDR_HI 0x824 +@@ -124,6 +153,16 @@ + #define PCIE_PORT_MULTI_LANE_CTRL 0x8C0 + #define PORT_MLTI_UPCFG_SUPPORT BIT(7) + ++/* ++ * PCIE_GEN3_RELATED: Gen3 Control Register ++ * This register holds the Gen3 related configuration. ++ * Bit[16] GEN3_EQUALIZATION_DISABLE: Equalization Disable. ++ * Disable equalization feature. This bit cannot be changed ++ * once the LTSSM starts link training. ++ */ ++#define PCIE_GEN3_RELATED 0x890 ++#define PORT_GEN3_EQUALIZATION_DISABLE BIT(16) ++ + #define PCIE_VERSION_NUMBER 0x8F8 + #define PCIE_VERSION_TYPE 0x8FC + +diff --git a/drivers/pci/controller/dwc/pcie-dw-sophgo.c b/drivers/pci/controller/dwc/pcie-dw-sophgo.c +new file mode 100644 +index 000000000000..ecf2091f1eef +--- /dev/null ++++ b/drivers/pci/controller/dwc/pcie-dw-sophgo.c +@@ -0,0 +1,1687 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * PCIe host controller driver for Sophgo SoCs. ++ * ++ * Copyright (C) 2023 Sophgo Tech Co., Ltd. ++ * http://www.sophgo.com ++ * ++ * Author: Lionel Li ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include "../../pci.h" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "pcie-dw-sophgo.h" ++ ++int sophgo_dw_pcie_probe(struct platform_device *pdev); ++ ++static void sophgo_dw_intx_mask(struct irq_data *data) ++{ ++ struct sophgo_dw_pcie *pcie = irq_data_get_irq_chip_data(data); ++ unsigned long flags = 0; ++ u32 val = 0; ++ ++ raw_spin_lock_irqsave(&pcie->pp.lock, flags); ++ val = sophgo_dw_pcie_read_ctrl(pcie, PCIE_CTRL_IRQ_EN_REG, 4); ++ val &= ~BIT(data->hwirq + PCIE_CTRL_IRQ_EN_INTX_SHIFT_BIT); ++ sophgo_dw_pcie_write_ctrl(pcie, PCIE_CTRL_IRQ_EN_REG, 4, val); ++ raw_spin_unlock_irqrestore(&pcie->pp.lock, flags); ++} ++ ++static void sophgo_dw_intx_unmask(struct irq_data *data) ++{ ++ struct sophgo_dw_pcie *pcie = irq_data_get_irq_chip_data(data); ++ unsigned long flags; ++ u32 val; + -+struct vendor_id_list { -+ const char *name; -+ uint16_t vendor_id; -+ uint16_t device_id; ++ raw_spin_lock_irqsave(&pcie->pp.lock, flags); ++ val = sophgo_dw_pcie_read_ctrl(pcie, PCIE_CTRL_IRQ_EN_REG, 4); ++ val |= BIT(data->hwirq + PCIE_CTRL_IRQ_EN_INTX_SHIFT_BIT); ++ sophgo_dw_pcie_write_ctrl(pcie, PCIE_CTRL_IRQ_EN_REG, 4, val); ++ raw_spin_unlock_irqrestore(&pcie->pp.lock, flags); ++} ++ ++/** ++ * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt ++ * @data: pointer to chip specific data ++ * ++ * As an emulated level IRQ, its interrupt status will remain ++ * until the corresponding de-assert message is received; hence that ++ * the status can only be cleared when the interrupt has been serviced. ++ */ ++static void sophgo_dw_intx_eoi(struct irq_data *data) ++{ ++ ++} ++ ++static int sophgo_dw_pcie_set_affinity(struct irq_data *data, ++ const struct cpumask *mask, bool force) ++{ ++ return -EINVAL; ++} ++ ++static struct irq_chip sophgo_dw_intx_irq_chip = { ++ .irq_mask = sophgo_dw_intx_mask, ++ .irq_unmask = sophgo_dw_intx_unmask, ++ .irq_eoi = sophgo_dw_intx_eoi, ++ .irq_set_affinity = sophgo_dw_pcie_set_affinity, ++ .name = "sophgo-dw-intx", +}; + -+extern struct vendor_id_list vendor_id_list[]; -+extern size_t vendor_id_list_num; ++static int sophgo_pcie_intx_map(struct irq_domain *domain, unsigned int irq, ++ irq_hw_number_t hwirq) ++{ ++ irq_set_chip_data(irq, domain->host_data); ++ irq_set_chip_and_handler_name(irq, &sophgo_dw_intx_irq_chip, ++ handle_fasteoi_irq, "sophgo-dw-intx"); ++ return 0; ++} ++ ++static const struct irq_domain_ops intx_domain_ops = { ++ .map = sophgo_pcie_intx_map, ++}; ++ ++static int sophgo_dw_pcie_init_intx_domains(struct sophgo_dw_pcie *pcie) ++{ ++ struct device *dev = pcie->dev; ++ struct device_node *intc_node, *node = dev->of_node; ++ int ret = 0; ++ ++ /* Setup INTx */ ++ intc_node = of_get_child_by_name(node, "interrupt-controller"); ++ if (!intc_node) { ++ dev_err(dev, "missing interrupt-controller node\n"); ++ return -ENODEV; ++ } ++ ++ pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX, ++ &intx_domain_ops, pcie); ++ if (!pcie->intx_domain) { ++ dev_err(dev, "failed to create INTx IRQ domain\n"); ++ ret = -ENODEV; ++ } ++ ++ of_node_put(intc_node); ++ ++ return ret; ++} ++ ++static void sophgo_dw_pcie_irq_handler(struct irq_desc *desc) ++{ ++ struct sophgo_dw_pcie *pcie = irq_desc_get_handler_data(desc); ++ struct irq_chip *irqchip = irq_desc_get_chip(desc); ++ unsigned long status; ++ irq_hw_number_t irq_bit = PCIE_CTRL_INT_SIG_0_PCIE_INTX_SHIFT_BIT; ++ ++ chained_irq_enter(irqchip, desc); ++ ++ status = sophgo_dw_pcie_read_ctrl(pcie, PCIE_CTRL_INT_SIG_0_REG, 4); ++ for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX + ++ PCIE_CTRL_INT_SIG_0_PCIE_INTX_SHIFT_BIT) ++ generic_handle_domain_irq(pcie->intx_domain, ++ irq_bit - PCIE_CTRL_INT_SIG_0_PCIE_INTX_SHIFT_BIT); ++ ++ chained_irq_exit(irqchip, desc); ++ ++} ++ ++static int sophgo_dw_pcie_setup_irq(struct sophgo_dw_pcie *pcie) ++{ ++ struct device *dev = pcie->dev; ++ struct platform_device *pdev = to_platform_device(dev); ++ int err; ++ ++ err = sophgo_dw_pcie_init_intx_domains(pcie); ++ if (err) ++ return err; ++ ++ pcie->irq = platform_get_irq_byname(pdev, "pcie_irq"); ++ pr_info("%s, irq = %d\n", __func__, pcie->irq); ++ if (pcie->irq < 0) ++ return pcie->irq; ++ ++ irq_set_chained_handler_and_data(pcie->irq, sophgo_dw_pcie_irq_handler, pcie); ++ ++ return 0; ++} ++ ++static void sophgo_dw_pcie_msi_ack_irq(struct irq_data *d) ++{ ++ irq_chip_ack_parent(d); ++} ++ ++static void sophgo_dw_pcie_msi_mask_irq(struct irq_data *d) ++{ ++ pci_msi_mask_irq(d); ++ irq_chip_mask_parent(d); ++} ++ ++static void sophgo_dw_pcie_msi_unmask_irq(struct irq_data *d) ++{ ++ pci_msi_unmask_irq(d); ++ irq_chip_unmask_parent(d); ++} ++ ++static struct irq_chip sophgo_dw_pcie_msi_irq_chip = { ++ .name = "sophgo-dw-msi", ++ .irq_ack = sophgo_dw_pcie_msi_ack_irq, ++ .irq_mask = sophgo_dw_pcie_msi_mask_irq, ++ .irq_unmask = sophgo_dw_pcie_msi_unmask_irq, ++}; ++ ++static struct msi_domain_info sophgo_dw_pcie_msi_domain_info = { ++ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | ++ MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), ++ .chip = &sophgo_dw_pcie_msi_irq_chip, ++}; ++ ++static int sophgo_dw_pcie_msi_setup(struct dw_pcie_rp *pp) ++{ ++ struct irq_domain *irq_parent = sophgo_get_msi_irq_domain(); ++ struct sophgo_dw_pcie *pcie = to_sophgo_dw_pcie_from_pp(pp); ++ struct fwnode_handle *fwnode = of_node_to_fwnode(pcie->dev->of_node); ++ ++#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) ++ if (!acpi_disabled) ++ fwnode = acpi_fwnode_handle(to_acpi_device(pcie->dev)); ++#endif ++ ++ pp->msi_domain = pci_msi_create_irq_domain(fwnode, ++ &sophgo_dw_pcie_msi_domain_info, ++ irq_parent); ++ if (!pp->msi_domain) { ++ dev_err(pcie->dev, "create msi irq domain failed\n"); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ ++u32 sophgo_dw_pcie_read_ctrl(struct sophgo_dw_pcie *pcie, u32 reg, size_t size) ++{ ++ int ret = 0; ++ u32 val = 0; ++ ++ ret = dw_pcie_read(pcie->ctrl_reg_base + reg, size, &val); ++ if (ret) ++ dev_err(pcie->dev, "Read ctrl address failed\n"); ++ ++ return val; ++} ++ ++void sophgo_dw_pcie_write_ctrl(struct sophgo_dw_pcie *pcie, u32 reg, size_t size, u32 val) ++{ ++ int ret = 0; ++ ++ ret = dw_pcie_write(pcie->ctrl_reg_base + reg, size, val); ++ if (ret) ++ dev_err(pcie->dev, "Write ctrl address failed\n"); ++} ++ ++u32 sophgo_dw_pcie_read_dbi(struct sophgo_dw_pcie *pcie, u32 reg, size_t size) ++{ ++ int ret = 0; ++ u32 val = 0; ++ ++ ret = dw_pcie_read(pcie->dbi_base + reg, size, &val); ++ if (ret) ++ dev_err(pcie->dev, "Read DBI address failed\n"); ++ ++ return val; ++} ++ ++void sophgo_dw_pcie_write_dbi(struct sophgo_dw_pcie *pcie, u32 reg, size_t size, u32 val) ++{ ++ int ret = 0; ++ ++ ret = dw_pcie_write(pcie->dbi_base + reg, size, val); ++ if (ret) ++ dev_err(pcie->dev, "Write DBI address failed\n"); ++} ++ ++static int sophgo_dw_pcie_link_up(struct sophgo_dw_pcie *pcie) ++{ ++ u32 val = 0; ++ ++ val = sophgo_dw_pcie_readl_dbi(pcie, PCIE_PORT_DEBUG1); ++ return ((val & PCIE_PORT_DEBUG1_LINK_UP) && ++ (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING))); ++} ++ ++u32 sophgo_dw_pcie_readl_atu(struct sophgo_dw_pcie *pcie, u32 dir, u32 index, u32 reg) ++{ ++ void __iomem *base; ++ int ret = 0; ++ u32 val = 0; ++ ++ base = sophgo_dw_pcie_select_atu(pcie, dir, index); ++ ++ ret = dw_pcie_read(base + reg, 4, &val); ++ if (ret) ++ dev_err(pcie->dev, "Read ATU address failed\n"); ++ ++ return val; ++} ++ ++void sophgo_dw_pcie_writel_atu(struct sophgo_dw_pcie *pcie, u32 dir, u32 index, ++ u32 reg, u32 val) ++{ ++ void __iomem *base; ++ int ret = 0; ++ ++ base = sophgo_dw_pcie_select_atu(pcie, dir, index); ++ ++ ret = dw_pcie_write(base + reg, 4, val); ++ if (ret) ++ dev_err(pcie->dev, "Write ATU address failed\n"); ++} ++ ++static void sophgo_dw_pcie_disable_atu(struct sophgo_dw_pcie *pcie, u32 dir, int index) ++{ ++ sophgo_dw_pcie_writel_atu(pcie, dir, index, PCIE_ATU_REGION_CTRL2, 0); ++} ++ ++static int sophgo_dw_pcie_prog_outbound_atu(struct sophgo_dw_pcie *pcie, ++ int index, int type, u64 cpu_addr, ++ u64 pci_addr, u64 size) ++{ ++ u32 retries = 0; ++ u32 val = 0; ++ u64 limit_addr = 0; ++ u32 func = 0; ++ ++ //if (pci->ops && pci->ops->cpu_addr_fixup) ++ // cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); ++ ++ limit_addr = cpu_addr + size - 1; ++ ++ if ((limit_addr & ~pcie->region_limit) != (cpu_addr & ~pcie->region_limit) || ++ !IS_ALIGNED(cpu_addr, pcie->region_align) || ++ !IS_ALIGNED(pci_addr, pcie->region_align) || !size) { ++ return -EINVAL; ++ } ++ ++ sophgo_dw_pcie_writel_atu_ob(pcie, index, PCIE_ATU_LOWER_BASE, ++ lower_32_bits(cpu_addr)); ++ sophgo_dw_pcie_writel_atu_ob(pcie, index, PCIE_ATU_UPPER_BASE, ++ upper_32_bits(cpu_addr)); ++ ++ sophgo_dw_pcie_writel_atu_ob(pcie, index, PCIE_ATU_LIMIT, ++ lower_32_bits(limit_addr)); ++ if (dw_pcie_ver_is_ge(pcie, 460A)) ++ sophgo_dw_pcie_writel_atu_ob(pcie, index, PCIE_ATU_UPPER_LIMIT, ++ upper_32_bits(limit_addr)); ++ ++ sophgo_dw_pcie_writel_atu_ob(pcie, index, PCIE_ATU_LOWER_TARGET, ++ lower_32_bits(pci_addr)); ++ sophgo_dw_pcie_writel_atu_ob(pcie, index, PCIE_ATU_UPPER_TARGET, ++ upper_32_bits(pci_addr)); ++ ++ val = type | PCIE_ATU_FUNC_NUM(func); ++ if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) && ++ dw_pcie_ver_is_ge(pcie, 460A)) ++ val |= PCIE_ATU_INCREASE_REGION_SIZE; ++ if (dw_pcie_ver_is(pcie, 490A)) ++ val |= PCIE_ATU_TD; ++ sophgo_dw_pcie_writel_atu_ob(pcie, index, PCIE_ATU_REGION_CTRL1, val); ++ ++ sophgo_dw_pcie_writel_atu_ob(pcie, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE); ++ ++ /* ++ * Make sure ATU enable takes effect before any subsequent config ++ * and I/O accesses. ++ */ ++ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { ++ val = sophgo_dw_pcie_readl_atu_ob(pcie, index, PCIE_ATU_REGION_CTRL2); ++ if (val & PCIE_ATU_ENABLE) ++ return 0; ++ ++ mdelay(LINK_WAIT_IATU); ++ } ++ ++ dev_err(pcie->dev, "Outbound iATU is not being enabled\n"); ++ ++ return -ETIMEDOUT; ++} ++ ++static int sophgo_dw_pcie_prog_inbound_atu(struct sophgo_dw_pcie *pcie, int index, int type, ++ u64 cpu_addr, u64 pci_addr, u64 size) ++{ ++ u64 limit_addr = pci_addr + size - 1; ++ u32 retries = 0; ++ u32 val = 0; ++ ++ if ((limit_addr & ~pcie->region_limit) != (pci_addr & ~pcie->region_limit) || ++ !IS_ALIGNED(cpu_addr, pcie->region_align) || ++ !IS_ALIGNED(pci_addr, pcie->region_align) || !size) { ++ return -EINVAL; ++ } ++ ++ sophgo_dw_pcie_writel_atu_ib(pcie, index, PCIE_ATU_LOWER_BASE, ++ lower_32_bits(pci_addr)); ++ sophgo_dw_pcie_writel_atu_ib(pcie, index, PCIE_ATU_UPPER_BASE, ++ upper_32_bits(pci_addr)); ++ ++ sophgo_dw_pcie_writel_atu_ib(pcie, index, PCIE_ATU_LIMIT, ++ lower_32_bits(limit_addr)); ++ if (dw_pcie_ver_is_ge(pcie, 460A)) ++ sophgo_dw_pcie_writel_atu_ib(pcie, index, PCIE_ATU_UPPER_LIMIT, ++ upper_32_bits(limit_addr)); ++ ++ sophgo_dw_pcie_writel_atu_ib(pcie, index, PCIE_ATU_LOWER_TARGET, ++ lower_32_bits(cpu_addr)); ++ sophgo_dw_pcie_writel_atu_ib(pcie, index, PCIE_ATU_UPPER_TARGET, ++ upper_32_bits(cpu_addr)); ++ ++ val = type; ++ if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) && ++ dw_pcie_ver_is_ge(pcie, 460A)) ++ val |= PCIE_ATU_INCREASE_REGION_SIZE; ++ sophgo_dw_pcie_writel_atu_ib(pcie, index, PCIE_ATU_REGION_CTRL1, val); ++ sophgo_dw_pcie_writel_atu_ib(pcie, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE); ++ ++ /* ++ * Make sure ATU enable takes effect before any subsequent config ++ * and I/O accesses. ++ */ ++ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { ++ val = sophgo_dw_pcie_readl_atu_ib(pcie, index, PCIE_ATU_REGION_CTRL2); ++ if (val & PCIE_ATU_ENABLE) ++ return 0; ++ ++ mdelay(LINK_WAIT_IATU); ++ } ++ ++ dev_err(pcie->dev, "Inbound iATU is not being enabled\n"); ++ ++ return -ETIMEDOUT; ++} ++static void __iomem *sophgo_dw_pcie_other_conf_map_bus(struct pci_bus *bus, ++ unsigned int devfn, int where) ++{ ++ struct dw_pcie_rp *pp = bus->sysdata; ++ struct sophgo_dw_pcie *pcie = to_sophgo_dw_pcie_from_pp(pp); ++ ++ if (!acpi_disabled) { ++ struct pci_config_window *cfg = bus->sysdata; ++ ++ pp = cfg->priv; ++ pcie = to_sophgo_dw_pcie_from_pp(pp); ++ } ++ ++ int type = 0; ++ int ret = 0; ++ u32 busdev = 0; ++ ++ /* ++ * Checking whether the link is up here is a last line of defense ++ * against platforms that forward errors on the system bus as ++ * SError upon PCI configuration transactions issued when the link ++ * is down. This check is racy by definition and does not stop ++ * the system from triggering an SError if the link goes down ++ * after this check is performed. ++ */ ++ if (!sophgo_dw_pcie_link_up(pcie)) ++ return NULL; ++ ++ busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | ++ PCIE_ATU_FUNC(PCI_FUNC(devfn)); ++ ++ if (pci_is_root_bus(bus->parent)) ++ type = PCIE_ATU_TYPE_CFG0; ++ else ++ type = PCIE_ATU_TYPE_CFG1; ++ ++ ret = sophgo_dw_pcie_prog_outbound_atu(pcie, 0, type, pp->cfg0_base, busdev, ++ pp->cfg0_size); ++ if (ret) ++ return NULL; ++ ++ return pp->va_cfg0_base + where; ++} ++ ++static int sophgo_dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 *val) ++{ ++ struct dw_pcie_rp *pp = bus->sysdata; ++ struct sophgo_dw_pcie *pcie = to_sophgo_dw_pcie_from_pp(pp); ++ int ret = 0; ++ ++ if (!acpi_disabled) { ++ struct pci_config_window *cfg = bus->sysdata; ++ ++ pp = cfg->priv; ++ pcie = to_sophgo_dw_pcie_from_pp(pp); ++ } ++ ++ ret = pci_generic_config_read(bus, devfn, where, size, val); ++ if (ret != PCIBIOS_SUCCESSFUL) ++ return ret; ++ ++ if (pp->cfg0_io_shared) { ++ ret = sophgo_dw_pcie_prog_outbound_atu(pcie, 0, PCIE_ATU_TYPE_IO, ++ pp->io_base, pp->io_bus_addr, ++ pp->io_size); ++ if (ret) ++ return PCIBIOS_SET_FAILED; ++ } ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++static int sophgo_dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 val) ++{ ++ struct dw_pcie_rp *pp = bus->sysdata; ++ struct sophgo_dw_pcie *pcie = to_sophgo_dw_pcie_from_pp(pp); ++ int ret = 0; ++ ++ if (!acpi_disabled) { ++ struct pci_config_window *cfg = bus->sysdata; ++ ++ pp = cfg->priv; ++ pcie = to_sophgo_dw_pcie_from_pp(pp); ++ } ++ ++ ret = pci_generic_config_write(bus, devfn, where, size, val); ++ if (ret != PCIBIOS_SUCCESSFUL) ++ return ret; ++ ++ if (pp->cfg0_io_shared) { ++ ret = sophgo_dw_pcie_prog_outbound_atu(pcie, 0, PCIE_ATU_TYPE_IO, ++ pp->io_base, pp->io_bus_addr, ++ pp->io_size); ++ if (ret) ++ return PCIBIOS_SET_FAILED; ++ } ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++static void __iomem *sophgo_dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where) ++{ ++ struct dw_pcie_rp *pp = bus->sysdata; ++ struct sophgo_dw_pcie *pcie = to_sophgo_dw_pcie_from_pp(pp); ++ ++ if (!acpi_disabled) { ++ struct pci_config_window *cfg = bus->sysdata; ++ ++ pp = cfg->priv; ++ pcie = to_sophgo_dw_pcie_from_pp(pp); ++ } ++ ++ if (pci_is_root_bus(bus)) { ++ if (PCI_SLOT(devfn) > 0) ++ return NULL; ++ return pcie->dbi_base + where; ++ } ++ ++ return sophgo_dw_pcie_other_conf_map_bus(bus, devfn, where); ++} ++ ++static struct pci_ops sophgo_dw_child_pcie_ops = { ++ .map_bus = sophgo_dw_pcie_other_conf_map_bus, ++ .read = sophgo_dw_pcie_rd_other_conf, ++ .write = sophgo_dw_pcie_wr_other_conf, ++}; ++ ++static struct pci_ops sophgo_dw_pcie_ops = { ++ .map_bus = sophgo_dw_pcie_own_conf_map_bus, ++ .read = pci_generic_config_read, ++ .write = pci_generic_config_write, ++}; ++ ++static int sophgo_dw_pcie_get_resources(struct sophgo_dw_pcie *pcie) ++{ ++ struct platform_device *pdev = to_platform_device(pcie->dev); ++ struct device *dev = pcie->dev; ++ struct device_node *np = dev_of_node(pcie->dev); ++ struct resource *res; ++ uint64_t start_addr; ++ uint64_t size; ++ int ret; ++ ++ if (device_property_present(dev, "pcie-card")) ++ pcie->pcie_card = 1; ++ ++ if (!pcie->dbi_base) { ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); ++ pcie->dbi_base = devm_pci_remap_cfg_resource(pcie->dev, res); ++ if (IS_ERR(pcie->dbi_base)) ++ return PTR_ERR(pcie->dbi_base); ++ } ++ ++ if (!pcie->ctrl_reg_base) { ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl_base"); ++ pcie->ctrl_reg_base = devm_pci_remap_cfg_resource(pcie->dev, res); ++ if (IS_ERR(pcie->ctrl_reg_base)) ++ return PTR_ERR(pcie->ctrl_reg_base); ++ pcie->ctrl_reg_base += 0xc00; ++ } ++ ++ /* For non-unrolled iATU/eDMA platforms this range will be ignored */ ++ if (!pcie->atu_base) { ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu"); ++ if (res) { ++ pcie->atu_size = resource_size(res); ++ pcie->atu_base = devm_ioremap_resource(pcie->dev, res); ++ if (IS_ERR(pcie->atu_base)) ++ return PTR_ERR(pcie->atu_base); ++ } else { ++ pcie->atu_base = pcie->dbi_base + DEFAULT_DBI_ATU_OFFSET; ++ } ++ } ++ ++ /* Set a default value suitable for at most 8 in and 8 out windows */ ++ if (!pcie->atu_size) ++ pcie->atu_size = SZ_4K; ++ ++ if (pcie->link_gen < 1) { ++ pcie->link_gen = of_pci_get_max_link_speed(np); ++ pcie->link_gen += 1; ++ } ++ ++ of_property_read_u32(np, "num-lanes", &pcie->num_lanes); ++ ++ if (of_property_read_bool(np, "snps,enable-cdm-check")) ++ dw_pcie_cap_set(pcie, CDM_CHECK); ++ ++ if (pcie->pcie_card) { ++ if (!pcie->sii_reg_base) ++ pcie->sii_reg_base = pcie->ctrl_reg_base - 0x800; ++ ++ if (!pcie->c2c_top) { ++ ret = of_property_read_u64_index(np, "c2c_top", 0, &start_addr); ++ ret = of_property_read_u64_index(np, "c2c_top", 1, &size); ++ if (ret) { ++ dev_err(dev, "no c2c top find\n"); ++ } else { ++ pcie->c2c_top = devm_ioremap(dev, start_addr, size); ++ if (!pcie->c2c_top) { ++ pr_err("c2c top base ioremap failed\n"); ++ return PTR_ERR(pcie->c2c_top); ++ } ++ } ++ } ++ ++ ret = of_property_read_u64_index(np, "cfg_range", 0, &pcie->cfg_start_addr); ++ ret = of_property_read_u64_index(np, "cfg_range", 1, &pcie->cfg_end_addr); ++ if (ret == 0) ++ dev_err(dev, "cfg[0x%llx-0x%llx]\n", pcie->cfg_start_addr, pcie->cfg_end_addr); ++ ++ ret = of_property_read_u64_index(np, "slv_range", 0, &pcie->slv_start_addr); ++ ret = of_property_read_u64_index(np, "slv_range", 1, &pcie->slv_end_addr); ++ if (ret == 0) ++ dev_err(dev, "slv[0x%llx-0x%llx]\n", pcie->slv_start_addr, pcie->slv_end_addr); ++ ++ ret = of_property_read_u64_index(np, "dw_range", 0, &pcie->dw_start); ++ ret = of_property_read_u64_index(np, "dw_range", 1, &pcie->dw_end); ++ if (ret == 0) ++ dev_err(dev, "dw[0x%llx-0x%llx]\n", pcie->dw_start, pcie->dw_end); ++ ++ ret = of_property_read_u64_index(np, "up_start_addr", 0, &pcie->up_start_addr); ++ if (ret == 0) ++ dev_err(dev, "up start addr:[0x%llx]\n", pcie->up_start_addr); ++ ++ pcie->phy = devm_of_phy_get(dev, dev->of_node, "pcie-phy"); ++ ++ pcie->pe_rst = of_get_named_gpio(dev->of_node, "prst", 0); //TODO:default high? or low? ++ dev_err(dev, "perst:[gpio%d]\n", pcie->pe_rst); ++ ++ if (device_property_present(dev, "c2c0_x8_1") || device_property_present(dev, "c2c1_x8_1")) ++ pcie->pcie_route_config = C2C_PCIE_X8_1; ++ else if (device_property_present(dev, "c2c0_x8_0") || device_property_present(dev, "c2c1_x8_0")) ++ pcie->pcie_route_config = C2C_PCIE_X8_0; ++ else if (device_property_present(dev, "c2c0_x4_1") || device_property_present(dev, "c2c1_x4_1")) ++ pcie->pcie_route_config = C2C_PCIE_X4_1; ++ else if (device_property_present(dev, "c2c0_x4_0") || device_property_present(dev, "c2c1_x4_0")) ++ pcie->pcie_route_config = C2C_PCIE_X4_0; ++ else if (device_property_present(dev, "cxp_x8")) ++ pcie->pcie_route_config = CXP_PCIE_X8; ++ else if (device_property_present(dev, "cxp_x4")) ++ pcie->pcie_route_config = CXP_PCIE_X4; ++ else ++ dev_err(dev, "error pcie type\n"); ++ ++ ret = of_property_read_u64_index(np, "cdma-reg", 0, &pcie->cdma_pa_start); ++ ret = of_property_read_u64_index(np, "cdma-reg", 1, &pcie->cdma_size); ++ if (ret) { ++ pr_err("cdma reg not found\n"); ++ return -1; ++ } ++ pcie->cdma_reg_base = devm_ioremap(dev, pcie->cdma_pa_start, pcie->cdma_size); ++ if (!pcie->cdma_reg_base) { ++ dev_err(dev, "failed to map cdma reg\n"); ++ return -1; ++ } ++ ++ if (of_device_is_compatible(np, "sophgo,bm1690-c2c-pcie-host")) { ++ pcie->c2c_pcie_rc = 1; ++ dev_err(dev, "probe c2c pcie host\n"); ++ } ++ } ++ ++ return 0; ++} ++ ++#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) ++static int sophgo_dw_pcie_get_resources_acpi(struct sophgo_dw_pcie *pcie, struct resource *res) ++{ ++ struct device_node *np = dev_of_node(pcie->dev); ++ ++ pcie->dbi_base = devm_pci_remap_cfg_resource(pcie->dev, &res[0]); ++ if (IS_ERR(pcie->dbi_base)) ++ return PTR_ERR(pcie->dbi_base); ++ ++ pcie->ctrl_reg_base = devm_pci_remap_cfg_resource(pcie->dev, &res[1]); ++ if (IS_ERR(pcie->ctrl_reg_base)) ++ return PTR_ERR(pcie->ctrl_reg_base); ++ ++ pcie->atu_size = resource_size(&res[2]); ++ pcie->atu_base = devm_ioremap_resource(pcie->dev, &res[2]); ++ if (IS_ERR(pcie->atu_base)) ++ return PTR_ERR(pcie->atu_base); ++ ++ /* Set a default value suitable for at most 8 in and 8 out windows */ ++ if (!pcie->atu_size) ++ pcie->atu_size = SZ_4K; ++ ++ if (pcie->link_gen < 1) { ++ pcie->link_gen = of_pci_get_max_link_speed(np); ++ pcie->link_gen += 1; ++ } ++ ++ return 0; ++} ++#endif ++ ++static int sophgo_dw_pcie_setup_outbound_atu(struct sophgo_dw_pcie *pcie, ++ struct list_head *list) ++{ ++ struct resource_entry *entry; ++ int i, ret = 0; ++ ++ resource_list_for_each_entry(entry, list) { ++ if (resource_type(entry->res) != IORESOURCE_MEM) ++ continue; ++ ++ if (pcie->num_ob_windows <= ++i) ++ break; ++ ++ ret = sophgo_dw_pcie_prog_outbound_atu(pcie, i, PCIE_ATU_TYPE_MEM, ++ entry->res->start, ++ entry->res->start - entry->offset, ++ resource_size(entry->res)); ++ if (ret) { ++ dev_err(pcie->dev, "Failed to set MEM range %pr\n", entry->res); ++ return ret; ++ } ++ } ++ ++ return 0; ++} ++ ++static int sophgo_dw_pcie_setup_inbound_atu(struct sophgo_dw_pcie *pcie, ++ struct list_head *list_dma) ++{ ++ struct resource_entry *entry; ++ int i, ret = 0; ++ struct bus_dma_region *r, **map; ++ u64 end, mask; ++ struct device *dev = pcie->dev; ++ ++ map = &r; ++ r = kcalloc(pcie->num_ib_windows + 1, sizeof(*r), GFP_KERNEL); ++ if (!r) ++ return -ENOMEM; ++ ++ resource_list_for_each_entry(entry, list_dma) { ++ if (resource_type(entry->res) != IORESOURCE_MEM) ++ continue; ++ ++ if (pcie->num_ib_windows <= i) ++ break; ++ ++ ret = sophgo_dw_pcie_prog_inbound_atu(pcie, i++, PCIE_ATU_TYPE_MEM, ++ entry->res->start, ++ entry->res->start - entry->offset, ++ resource_size(entry->res)); ++ if (ret) { ++ dev_err(pcie->dev, "Failed to set DMA range %pr\n", entry->res); ++ kfree(r); ++ return ret; ++ } ++ ++ r->cpu_start = entry->res->start; ++ r->dma_start = entry->res->start - entry->offset; ++ r->size = resource_size(entry->res); ++ r++; ++ ++ end = dma_range_map_max(*map); ++ mask = DMA_BIT_MASK(ilog2(end) + 1); ++ dev->bus_dma_limit = end; ++ dev->coherent_dma_mask &= mask; ++ if (dev->dma_mask) ++ *dev->dma_mask &= mask; ++ } ++ ++ return 0; ++} ++ ++static int sophgo_dw_pcie_iatu_setup(struct dw_pcie_rp *pp) ++{ ++ struct sophgo_dw_pcie *pcie = to_sophgo_dw_pcie_from_pp(pp); ++ struct list_head *list, *list_dma; ++ int i, ret = 0; ++ ++#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) ++ if (!acpi_disabled) { ++ struct acpi_device *adev = to_acpi_device(pcie->dev); ++ unsigned long flags; ++ struct list_head resource_list; ++ ++ INIT_LIST_HEAD(&resource_list); ++ flags = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_BUS; ++ ret = acpi_dev_get_resources(adev, &resource_list, acpi_dev_filter_resource_type_cb, (void *)flags); ++ if (ret < 0) { ++ pr_err("failed to parse _CRS method, error code %d\n", ret); ++ return ret; ++ } else if (ret == 0) { ++ pr_err("no IO and memory resources present in _CRS\n"); ++ return -EINVAL; ++ } ++ list = &resource_list; ++ ++ } else ++ list = &pp->bridge->windows; ++#else ++ list = &pp->bridge->windows; ++#endif ++ /* Note the very first outbound ATU is used for CFG IOs */ ++ if (!pcie->num_ob_windows) { ++ dev_err(pcie->dev, "No outbound iATU found\n"); ++ return -EINVAL; ++ } ++ ++ /* ++ * Ensure all out/inbound windows are disabled before proceeding with ++ * the MEM/IO (dma-)ranges setups. ++ */ ++ for (i = 0; i < pcie->num_ob_windows; i++) ++ sophgo_dw_pcie_disable_atu(pcie, PCIE_ATU_REGION_DIR_OB, i); ++ ++ for (i = 0; i < pcie->num_ib_windows; i++) ++ sophgo_dw_pcie_disable_atu(pcie, PCIE_ATU_REGION_DIR_IB, i); ++ ++ /* Setup outbound ATU */ ++ ret = sophgo_dw_pcie_setup_outbound_atu(pcie, list); ++ if (ret) ++ return ret; ++ ++#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) ++ /* Setup inbound ATU */ ++ if (!acpi_disabled) { ++ struct acpi_device *adev = to_acpi_device(pcie->dev); ++ unsigned long flags; ++ struct list_head resource_dma_list; ++ ++ INIT_LIST_HEAD(&resource_dma_list); ++ flags = IORESOURCE_DMA; ++ ret = acpi_dev_get_dma_resources(adev, &resource_dma_list); ++ if (ret < 0) { ++ pr_err("failed to parse _DMA method, error code %d\n", ret); ++ return ret; ++ } else if (ret == 0) { ++ pr_err("no memory resources present in _DMA\n"); ++ return -EINVAL; ++ } ++ ++ list_dma = &resource_dma_list; ++ } else ++ list_dma = &pp->bridge->dma_ranges; ++#else ++ list_dma = &pp->bridge->dma_ranges; ++#endif ++ ++ ret = sophgo_dw_pcie_setup_inbound_atu(pcie, list_dma); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++static int sophgo_dw_pcie_setup_rc(struct dw_pcie_rp *pp) ++{ ++ struct sophgo_dw_pcie *pcie = to_sophgo_dw_pcie_from_pp(pp); ++ struct device *dev = pcie->dev; ++ u32 val = 0; ++ u32 ctrl = 0; ++ u32 num_ctrls = 0; ++ int ret = 0; ++ ++ /* ++ * Enable DBI read-only registers for writing/updating configuration. ++ * Write permission gets disabled towards the end of this function. ++ */ ++ sophgo_dw_pcie_dbi_ro_wr_en(pcie); ++ ++ if (pci_msi_enabled()) { ++ pp->has_msi_ctrl = !(of_property_read_bool(dev->of_node, "msi-parent")); ++ if (pp->has_msi_ctrl) { ++ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; ++ ++ /* Initialize IRQ Status array */ ++ for (ctrl = 0; ctrl < num_ctrls; ctrl++) { ++ sophgo_dw_pcie_writel_dbi(pcie, PCIE_MSI_INTR0_MASK + ++ (ctrl * MSI_REG_CTRL_BLOCK_SIZE), ++ pp->irq_mask[ctrl]); ++ sophgo_dw_pcie_writel_dbi(pcie, PCIE_MSI_INTR0_ENABLE + ++ (ctrl * MSI_REG_CTRL_BLOCK_SIZE), ++ ~0); ++ } ++ } ++ } ++ ++ ret = sophgo_dw_pcie_msi_setup(pp); ++ if (ret) ++ return ret; ++ ++ /* Setup RC BARs */ ++ //sophgo_dw_pcie_writel_dbi(pcie, PCI_BASE_ADDRESS_0, 0x00000004); ++ //sophgo_dw_pcie_writel_dbi(pcie, PCI_BASE_ADDRESS_1, 0x00000000); ++ ++ /* Setup interrupt pins */ ++ //val = sophgo_dw_pcie_readl_dbi(pcie, PCI_INTERRUPT_LINE); ++ //val &= 0xffff00ff; ++ //val |= 0x00000100; ++ //sophgo_dw_pcie_writel_dbi(pcie, PCI_INTERRUPT_LINE, val); ++ ++ /* Setup bus numbers */ ++ val = sophgo_dw_pcie_readl_dbi(pcie, PCI_PRIMARY_BUS); ++ val &= 0xff000000; ++ val |= 0x00ff0100; ++ sophgo_dw_pcie_writel_dbi(pcie, PCI_PRIMARY_BUS, val); ++ ++ /* Setup command register */ ++ val = sophgo_dw_pcie_readl_dbi(pcie, PCI_COMMAND); ++ val &= 0xffff0000; ++ val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | ++ PCI_COMMAND_MASTER | PCI_COMMAND_SERR; ++ sophgo_dw_pcie_writel_dbi(pcie, PCI_COMMAND, val); ++ ++ /* ++ * If the platform provides its own child bus config accesses, it means ++ * the platform uses its own address translation component rather than ++ * ATU, so we should not program the ATU here. ++ */ ++ if (!acpi_disabled) { ++ ret = sophgo_dw_pcie_iatu_setup(pp); ++ if (ret) ++ return ret; ++ } else if (pp->bridge->child_ops == &sophgo_dw_child_pcie_ops) { ++ ret = sophgo_dw_pcie_iatu_setup(pp); ++ if (ret) ++ return ret; ++ } ++ ++ //sophgo_dw_pcie_writel_dbi(pcie, PCI_BASE_ADDRESS_0, 0); ++ ++ /* Program correct class for RC */ ++ sophgo_dw_pcie_writew_dbi(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI); ++ ++ //val = sophgo_dw_pcie_readl_dbi(pcie, PCIE_LINK_WIDTH_SPEED_CONTROL); ++ //val |= PORT_LOGIC_SPEED_CHANGE; ++ //sophgo_dw_pcie_writel_dbi(pcie, PCIE_LINK_WIDTH_SPEED_CONTROL, val); ++ ++ sophgo_dw_pcie_dbi_ro_wr_dis(pcie); ++ ++ return 0; ++} ++ ++static void sophgo_dw_pcie_version_detect(struct sophgo_dw_pcie *pcie) ++{ ++ u32 ver = 0; ++ ++ /* The content of the CSR is zero on DWC PCIe older than v4.70a */ ++ ver = sophgo_dw_pcie_readl_dbi(pcie, PCIE_VERSION_NUMBER); ++ if (!ver) ++ return; ++ ++ if (pcie->version && pcie->version != ver) ++ dev_warn(pcie->dev, "Versions don't match (%08x != %08x)\n", ++ pcie->version, ver); ++ else ++ pcie->version = ver; ++ ++ ver = sophgo_dw_pcie_readl_dbi(pcie, PCIE_VERSION_TYPE); ++ ++ if (pcie->type && pcie->type != ver) ++ dev_warn(pcie->dev, "Types don't match (%08x != %08x)\n", ++ pcie->type, ver); ++ else ++ pcie->type = ver; ++} ++ ++static void sophgo_dw_pcie_iatu_detect(struct sophgo_dw_pcie *pcie) ++{ ++ int max_region = 0; ++ int ob = 0; ++ int ib = 0; ++ u32 val = 0; ++ u32 min = 0; ++ u32 dir = 0; ++ u64 max = 0; ++ ++ val = sophgo_dw_pcie_readl_dbi(pcie, PCIE_ATU_VIEWPORT); ++ if (val == 0xFFFFFFFF) { ++ dw_pcie_cap_set(pcie, IATU_UNROLL); ++ ++ max_region = min((int)pcie->atu_size / 512, 256); ++ } else { ++ pcie->atu_base = pcie->dbi_base + PCIE_ATU_VIEWPORT_BASE; ++ pcie->atu_size = PCIE_ATU_VIEWPORT_SIZE; ++ ++ sophgo_dw_pcie_writel_dbi(pcie, PCIE_ATU_VIEWPORT, 0xFF); ++ max_region = sophgo_dw_pcie_readl_dbi(pcie, PCIE_ATU_VIEWPORT) + 1; ++ } ++ ++ for (ob = 0; ob < max_region; ob++) { ++ sophgo_dw_pcie_writel_atu_ob(pcie, ob, PCIE_ATU_LOWER_TARGET, 0x11110000); ++ val = sophgo_dw_pcie_readl_atu_ob(pcie, ob, PCIE_ATU_LOWER_TARGET); ++ if (val != 0x11110000) ++ break; ++ } ++ ++ for (ib = 0; ib < max_region; ib++) { ++ sophgo_dw_pcie_writel_atu_ib(pcie, ib, PCIE_ATU_LOWER_TARGET, 0x11110000); ++ val = sophgo_dw_pcie_readl_atu_ib(pcie, ib, PCIE_ATU_LOWER_TARGET); ++ if (val != 0x11110000) ++ break; ++ } ++ ++ if (ob) { ++ dir = PCIE_ATU_REGION_DIR_OB; ++ } else if (ib) { ++ dir = PCIE_ATU_REGION_DIR_IB; ++ } else { ++ dev_err(pcie->dev, "No iATU regions found\n"); ++ return; ++ } ++ ++ sophgo_dw_pcie_writel_atu(pcie, dir, 0, PCIE_ATU_LIMIT, 0x0); ++ min = sophgo_dw_pcie_readl_atu(pcie, dir, 0, PCIE_ATU_LIMIT); ++ ++ if (dw_pcie_ver_is_ge(pcie, 460A)) { ++ sophgo_dw_pcie_writel_atu(pcie, dir, 0, PCIE_ATU_UPPER_LIMIT, 0xFFFFFFFF); ++ max = sophgo_dw_pcie_readl_atu(pcie, dir, 0, PCIE_ATU_UPPER_LIMIT); ++ } else { ++ max = 0; ++ } ++ ++ pcie->num_ob_windows = ob; ++ pcie->num_ib_windows = ib; ++ pcie->region_align = 1 << fls(min); ++ pcie->region_limit = (max << 32) | (SZ_4G - 1); ++ ++ dev_info(pcie->dev, "iATU: unroll %s, %u ob, %u ib, align %uK, limit %lluG\n", ++ dw_pcie_cap_is(pcie, IATU_UNROLL) ? "T" : "F", ++ pcie->num_ob_windows, pcie->num_ib_windows, ++ pcie->region_align / SZ_1K, (pcie->region_limit + 1) / SZ_1G); ++} ++ ++static int pcie_config_eq(struct sophgo_dw_pcie *pcie) ++{ ++ uint32_t val = 0; ++ uint32_t speed = 0; ++ uint32_t pset_id = 0; ++ void __iomem *pcie_dbi_base = pcie->dbi_base; ++ struct PCIE_EQ_COEF eq_coef_tbl[11] = { //p0 ~ p10 ++ {36, 0, 12}, {40, 0, 8}, {38, 0, 10}, {42, 0, 6}, {48, 0, 0}, {44, 4, 0}, ++ {42, 6, 0}, {34, 5, 9}, {36, 6, 6}, {40, 8, 0}, {32, 0, 16} ++ }; ++ ++ for (speed = 0; speed < 3; speed++) { ++ val = readl(pcie_dbi_base + 0x890); //set speed ++ val &= 0xfcffffff; ++ val |= (speed << 24); ++ writel(val, (pcie_dbi_base + 0x890)); ++ ++ val = readl(pcie_dbi_base + 0x894); ++ val &= 0xfffff000; //bit[11, 0] ++ val |= 16; ++ val |= (48 << 6); ++ writel(val, (pcie_dbi_base + 0x894)); ++ ++ for (pset_id = 0; pset_id < 11; pset_id++) { ++ val = readl(pcie_dbi_base + 0x89c); ++ val &= 0xfffffff0; //bit[3, 0] ++ val |= pset_id; ++ writel(val, (pcie_dbi_base + 0x89c)); ++ ++ val = readl(pcie_dbi_base + 0x898); ++ val &= 0xfffc0000; //bit[17, 0] ++ val |= eq_coef_tbl[pset_id].pre_cursor; ++ val |= (eq_coef_tbl[pset_id].cursor << 6); ++ val |= (eq_coef_tbl[pset_id].post_cursor << 12); ++ writel(val, (pcie_dbi_base + 0x898)); ++ ++ val = readl(pcie_dbi_base + 0x8a4); ++ if (val & 0x1) //bit0 ++ pr_info("illegal coef pragrammed, speed[%d], pset[%d].\n", speed, pset_id); ++ } ++ } ++ ++ return 0; ++} ++ ++static int pcie_config_link(struct sophgo_dw_pcie *pcie) ++{ ++ void __iomem *dbi_base = pcie->dbi_base; ++ uint32_t val; ++ ++ //config lane_count ++ val = readl(dbi_base + 0x8c0); ++ val = (val & 0xffffffc0) | pcie->num_lanes; ++ writel(val, (dbi_base + 0x8c0)); ++ ++ //config eq bypass highest rate disable ++ val = readl(dbi_base + 0x1c0); ++ val |= 0x1; ++ writel(val, (dbi_base + 0x1c0)); ++ ++ return 0; ++} ++ ++static int pcie_enable_ltssm(struct sophgo_dw_pcie *pcie) ++{ ++ void __iomem *sii_reg_base = pcie->sii_reg_base; ++ uint32_t val; ++ ++ val = readl(sii_reg_base + PCIE_SII_GENERAL_CTRL3_REG); ++ val |= 0x1; ++ writel(val, sii_reg_base + PCIE_SII_GENERAL_CTRL3_REG); ++ ++ return 0; ++} ++ ++static int pcie_wait_link(struct sophgo_dw_pcie *pcie) ++{ ++ void __iomem *sii_reg_base = pcie->sii_reg_base; ++ uint32_t status; ++ int err; ++ int timeout = 500; ++ ++ err = readl_poll_timeout(sii_reg_base + 0xb4, status, ((status >> 7) & 1), 20, timeout * USEC_PER_MSEC); ++ if (err) { ++ pr_err("[sg2260] failed to poll link ready\n"); ++ return -ETIMEDOUT; ++ } ++ ++ return 0; ++} ++ ++static int pcie_config_ctrl(struct sophgo_dw_pcie *pcie) ++{ ++ void __iomem *dbi_reg_base = pcie->dbi_base; ++ void __iomem *sii_reg_base = pcie->sii_reg_base; ++ uint32_t val; ++ ++ //config device_type ++ val = readl(sii_reg_base + PCIE_SII_GENERAL_CTRL1_REG); ++ val &= (~PCIE_SII_GENERAL_CTRL1_DEVICE_TYPE_MASK); ++ val |= (4 << 9);//RC MODE ++ writel(val, (sii_reg_base + PCIE_SII_GENERAL_CTRL1_REG)); ++ ++ //config Directed Speed Change Writing '1' to this field instructs the LTSSM to initiate ++ //a speed change to Gen2 or Gen3 after the link is initialized at Gen1 speed ++ val = readl(dbi_reg_base + 0x80c); ++ val = val | 0x20000; ++ writel(val, (dbi_reg_base + 0x80c)); ++ ++ //config generation_select-pcie_cap_target_link_speed ++ val = readl(dbi_reg_base + 0xa0); ++ val = (val & 0xfffffff0) | pcie->link_gen; ++ writel(val, (dbi_reg_base + 0xa0)); ++ ++ // config ecrc generation enable ++ val = readl(dbi_reg_base + 0x118); ++ val = 0x3e0; ++ writel(val, (dbi_reg_base + 0x118)); ++ ++ return 0; ++} ++ ++static int pcie_check_link_status(struct sophgo_dw_pcie *pcie) ++{ ++ uint32_t val = 0; ++ uint32_t speed = 0; ++ uint32_t width = 0; ++ uint32_t ltssm_state = 0; ++ void __iomem *pcie_sii_base = pcie->sii_reg_base; ++ void __iomem *pcie_dbi_base = pcie->dbi_base; ++ ++ val = readl(pcie_sii_base + 0xb4); //LNK_DBG_2 ++ ltssm_state = val & 0x3f; //bit[5,0] ++ if (ltssm_state != 0x11) ++ pr_err("PCIe link fail, ltssm_state = 0x%x\n", ltssm_state); ++ ++ speed = (val >> 8) & 0x7; //bit[10,8] ++ if ((speed + 1) != pcie->link_gen) ++ pr_err("link speed, expect gen%d, current gen%d\n", pcie->link_gen, (speed + 1)); ++ ++ val = readl(pcie_dbi_base + 0x80); ++ width = (val >> 20) & 0x3f; //bit[25:20] ++ if (width != pcie->num_lanes) ++ pr_err("link width, expect x%d, current x%d\n", pcie->num_lanes, width); ++ ++ pr_info("PCIe Link status, ltssm[0x%x], gen%d, x%d.\n", ltssm_state, (speed + 1), width); ++ ++ return 0; ++} ++ ++static int pcie_config_soft_phy_reset(struct sophgo_dw_pcie *pcie, uint32_t rst_status) ++{ ++ uint32_t val = 0; ++ void __iomem *reg_base; ++ ++ //deassert = 1; assert = 0; ++ if ((rst_status != 0) && (rst_status != 1)) ++ return -1; ++ ++ reg_base = pcie->ctrl_reg_base; ++ ++ //cfg soft_phy_rst_n , first cfg 1 ++ val = readl(reg_base + PCIE_CTRL_SFT_RST_SIG_REG); ++ if (rst_status == 1) ++ val |= (0x1 << PCIE_CTRL_SFT_RST_SIG_PHY_RSTN_BIT); ++ else ++ val &= (~PCIE_CTRL_SFT_RST_SIG_PHY_RSTN_BIT); ++ ++ writel(val, (reg_base + PCIE_CTRL_SFT_RST_SIG_REG)); ++ ++ udelay(1); ++ ++ return 0; ++} ++ ++static int pcie_config_soft_cold_reset(struct sophgo_dw_pcie *pcie) ++{ ++ uint32_t val = 0; ++ void __iomem *reg_base; ++ ++ ++ reg_base = pcie->ctrl_reg_base; ++ ++ //cfg soft_cold_rst_n , first cfg 0 ++ val = readl(reg_base + PCIE_CTRL_SFT_RST_SIG_REG); ++ val &= (~PCIE_CTRL_SFT_RST_SIG_COLD_RSTN_BIT); ++ writel(val, (reg_base + PCIE_CTRL_SFT_RST_SIG_REG)); ++ ++ //cfg soft_cold_rst_n , second cfg 1 ++ val = readl(reg_base + PCIE_CTRL_SFT_RST_SIG_REG); ++ val |= (0x1 << PCIE_CTRL_SFT_RST_SIG_COLD_RSTN_BIT); ++ writel(val, (reg_base + PCIE_CTRL_SFT_RST_SIG_REG)); ++ ++ return 0; ++} ++ ++static void pcie_check_radm_status(struct sophgo_dw_pcie *pcie) ++{ ++ uint32_t val = 0; ++ void __iomem *base_addr = pcie->ctrl_reg_base; ++ int timeout = 0; ++ ++ do { ++ udelay(30); ++ if (pcie->num_lanes == 8) { ++ val = readl(base_addr + 0xfc); ++ val = (val >> 29) & 0x1; //bit29, radm_idle ++ } else { ++ val = readl(base_addr + 0xe8); ++ val = (val >> 21) & 0x1; //bit21, radm_idle ++ } ++ timeout++; ++ if (timeout == 200) { ++ pr_err("failed check radm status\n"); ++ return; ++ } ++ } while (val != 1); ++} ++ ++static void pcie_clear_slv_mapping(struct sophgo_dw_pcie *pcie) ++{ ++ void __iomem *ctrl_reg_base = pcie->ctrl_reg_base; ++ ++ writel(0x0, (ctrl_reg_base + PCIE_CTRL_REMAPPING_EN_REG)); ++ writel(0x0, (ctrl_reg_base + PCIE_CTRL_SN_UP_START_ADDR_REG)); ++ writel(0x0, (ctrl_reg_base + PCIE_CTRL_SN_UP_END_ADDR_REG)); ++ writel(0x0, (ctrl_reg_base + PCIE_CTRL_SN_DW_ADDR_REG)); ++} ++ ++static void pcie_config_slv_mapping(struct sophgo_dw_pcie *pcie) ++{ ++ uint32_t val = 0; ++ void __iomem *ctrl_reg_base = pcie->ctrl_reg_base; ++ uint64_t up_start_addr = 0; ++ uint64_t up_end_addr = 0; ++ uint32_t dw_start_addr = 0; ++ uint32_t dw_end_addr = 0; ++ uint32_t full_addr = 0; ++ ++ up_start_addr = pcie->slv_start_addr; ++ up_end_addr = pcie->slv_end_addr; ++ ++ dw_start_addr = pcie->dw_start; ++ dw_end_addr = pcie->dw_end; ++ ++ full_addr = (((dw_end_addr >> 16) & 0xffff) << 16) | ((dw_start_addr >> 16) & 0xffff); ++ ++ val = readl(ctrl_reg_base + PCIE_CTRL_REMAPPING_EN_REG); ++ val |= 0x3 << PCIE_CTRL_REMAP_EN_SN_TO_PCIE_UP4G_EN_BIT; ++ writel(val, (ctrl_reg_base + PCIE_CTRL_REMAPPING_EN_REG)); ++ up_start_addr = up_start_addr >> 16; ++ writel((up_start_addr & 0xffffffff), (ctrl_reg_base + PCIE_CTRL_SN_UP_START_ADDR_REG)); ++ up_end_addr = up_end_addr >> 16; ++ writel((up_end_addr & 0xffffffff), (ctrl_reg_base + PCIE_CTRL_SN_UP_END_ADDR_REG)); ++ // cfg sn_to_pcie_dw4g_start_addr and end addr ++ writel(full_addr, (ctrl_reg_base + PCIE_CTRL_SN_DW_ADDR_REG)); ++} ++ ++static void pcie_config_mps(struct sophgo_dw_pcie *pcie) ++{ ++ uint32_t val = 0; ++ int mps = 1; ++ void __iomem *base_addr = pcie->dbi_base; ++ ++ val = readl(base_addr + 0x78); ++ val &= 0xffffff1f; ++ val |= ((mps & 0x7) << 5); ++ writel(val, (base_addr + 0x78)); ++} ++ ++static void pcie_config_mrrs(struct sophgo_dw_pcie *pcie) ++{ ++ uint32_t val = 0; ++ void __iomem *base_addr = pcie->dbi_base; ++ int mrrs = 1; ++ ++ val = readl(base_addr + 0x78); ++ val &= 0xffff8fff; ++ val |= ((mrrs & 0x7) << 12); ++ writel(val, (base_addr + 0x78)); ++} ++ ++ ++static void pcie_config_port_code(struct sophgo_dw_pcie *pcie) ++{ ++ uint32_t val = 0; ++ int c2c_id = 1; ++ ++ //config port code ++ if (c2c_id == 0) ++ val = (0xf) | (0x0 << 4) | (0xf << 8) | (0xf << 12) | (0xf << 16); ++ else if (c2c_id == 1) ++ val = (0x0) | (0x5 << 4) | (0xf << 8) | (0xf << 12) | (0x7 << 16); ++ ++ writel(val, pcie->c2c_top); ++} ++ ++static void pcie_config_cascade_rc_atu(struct sophgo_dw_pcie *pcie) ++{ ++ uint64_t up_start_addr = 0; ++ void __iomem *atu_base = pcie->atu_base; ++ ++ up_start_addr = pcie->up_start_addr; ++ writel(0x0, (atu_base + 0x1008)); //src ++ writel((up_start_addr >> 32), (atu_base + 0x100c)); ++ writel(0x3fffff, (atu_base + 0x1010)); //size 4M ++ writel((up_start_addr >> 32), (atu_base + 0x1020)); //size 4M ++ writel(0x24800000, (atu_base + 0x1014)); //target ++ writel(0x0, (atu_base + 0x1018)); ++ writel(0x2000, (atu_base + 0x1000)); ++ writel(0x80000000, (atu_base + 0x1004)); ++ ++ writel(0x400000, (atu_base + 0x1208)); //src ++ writel((up_start_addr >> 32), (atu_base + 0x120c)); ++ writel(0x7fffff, (atu_base + 0x1210)); //size 4M ++ writel((up_start_addr >> 32), (atu_base + 0x1220)); //size 4M ++ writel(0x24c00000, (atu_base + 0x1214)); //target ++ writel(0x0, (atu_base + 0x1218)); ++ writel(0x2000, (atu_base + 0x1200)); ++ writel(0x80000000, (atu_base + 0x1204)); ++ ++ writel(0xa0000000, (atu_base + 0x1408)); //src ++ writel((up_start_addr >> 32), (atu_base + 0x140c)); ++ writel(0xa0ffffff, (atu_base + 0x1410)); //size 16M ++ writel((up_start_addr >> 32), (atu_base + 0x1420)); //size 16M ++ writel(0x00000000, (atu_base + 0x1414)); //target ++ writel(0x52, (atu_base + 0x1418)); ++ writel(0x2000, (atu_base + 0x1400)); ++ writel(0x80000000, (atu_base + 0x1404)); ++} ++ ++static void bm1690_pcie_init_route(struct sophgo_dw_pcie *pcie) ++{ ++ pcie_clear_slv_mapping(pcie); ++ pcie_config_slv_mapping(pcie); ++ pcie_config_mps(pcie); ++ pcie_config_mrrs(pcie); ++ ++ pcie_config_port_code(pcie); ++ pcie_config_cascade_rc_atu(pcie); ++} ++ ++static void pcie_config_axi_route(struct sophgo_dw_pcie *pcie) ++{ ++ uint64_t cfg_start_addr; ++ uint64_t cfg_end_addr; ++ ++ cfg_start_addr = pcie->cfg_start_addr; ++ cfg_end_addr = pcie->cfg_end_addr; ++ ++ ++ writel((cfg_start_addr & 0xffffffff), (pcie->c2c_top + 0x24)); ++ writel(((cfg_start_addr >> 32) & 0xffffffff), (pcie->c2c_top + 0x28)); ++ writel((cfg_end_addr & 0xffffffff), (pcie->c2c_top + 0x2c)); ++ writel(((cfg_end_addr >> 32) & 0xffffffff), (pcie->c2c_top + 0x30)); ++ //writel((C2C_PCIE_TOP_REG(c2c_id) + 0xcc), 0xf0000); ++} ++ ++static int sophgo_pcie_host_init_port(struct sophgo_dw_pcie *pcie) ++{ ++ int ret; ++ int err; ++ uint32_t val; ++ void __iomem *sii_reg_base = pcie->sii_reg_base; ++ int timeout = 0; ++ ++ phy_init(pcie->phy); ++ pcie_config_soft_phy_reset(pcie, PCIE_RST_ASSERT); ++ pcie_config_soft_phy_reset(pcie, PCIE_RST_DE_ASSERT); ++ pcie_config_soft_cold_reset(pcie); ++ ++ gpio_direction_output(pcie->pe_rst, 0); ++ gpio_set_value(pcie->pe_rst, 0); ++ msleep(1000); ++ gpio_set_value(pcie->pe_rst, 1); ++ ret = phy_configure(pcie->phy, NULL); ++ if (ret) { ++ dev_err(pcie->dev, "phy config failed\n"); ++ return ret; ++ } ++ ++ /*pcie wait core clk*/ ++ do { ++ val = readl(sii_reg_base + 0x5c); //GEN_CTRL_4 ++ val = (val >> 8) & 1; ++ if (val == 1) { ++ dev_err(pcie->dev, "phy config success\n"); ++ break; ++ } ++ timeout++; ++ msleep(10); ++ ++ if (timeout == 50) { ++ dev_err(pcie->dev, "wait core clk failed\n"); ++ return -1; ++ } ++ ++ } while (1); ++ ++ pcie_check_radm_status(pcie); ++ ++ pcie_config_ctrl(pcie); ++ pcie_config_eq(pcie); ++ pcie_config_link(pcie); ++ pcie_enable_ltssm(pcie); ++ ++ err = pcie_wait_link(pcie); ++ if (err) { ++ pr_err("pcie wait link failed\n"); ++ return err; ++ } ++ ++ pcie_check_link_status(pcie); ++ pcie_config_axi_route(pcie); ++ bm1690_pcie_init_route(pcie); ++ ++ return 0; ++} ++ ++static int sophgo_pcie_config_cdma_route(struct sophgo_dw_pcie *pcie) ++{ ++ uint32_t tmp; ++ ++ pr_err("cdma config, pcie route config:0x%x\n", pcie->pcie_route_config); ++ ++ tmp = (pcie->pcie_route_config << 28) | (pcie->cdma_pa_start >> 32); ++ writel(tmp, pcie->cdma_reg_base + CDMA_CSR_RCV_ADDR_H32); ++ ++ tmp = (pcie->cdma_pa_start & ((1ul << 32) - 1)) >> 16; ++ writel(tmp, pcie->cdma_reg_base + CDMA_CSR_RCV_ADDR_M16); ++ ++ // OS: 2 ++ tmp = readl(pcie->cdma_reg_base + CDMA_CSR_4) | (1 << CDMA_CSR_RCV_CMD_OS); ++ writel(tmp, pcie->cdma_reg_base + CDMA_CSR_4); ++ ++ tmp = (readl(pcie->cdma_reg_base + CDMA_CSR_INTER_DIE_RW) & ++ ~(0xff << CDMA_CSR_INTER_DIE_WRITE_ADDR_L4)) | ++ (pcie->pcie_route_config << CDMA_CSR_INTER_DIE_WRITE_ADDR_H4) | ++ (0b0000 << CDMA_CSR_INTER_DIE_WRITE_ADDR_L4); ++ writel(tmp, pcie->cdma_reg_base + CDMA_CSR_INTER_DIE_RW); ++ ++ tmp = (readl(pcie->cdma_reg_base + CDMA_CSR_INTRA_DIE_RW) & ++ ~(0xff << CDMA_CSR_INTRA_DIE_READ_ADDR_L4)) | ++ (AXI_RN << CDMA_CSR_INTRA_DIE_READ_ADDR_H4) | ++ (0b0000 << CDMA_CSR_INTRA_DIE_READ_ADDR_L4); ++ writel(tmp, pcie->cdma_reg_base + CDMA_CSR_INTRA_DIE_RW); ++ ++ return 0; ++} ++ ++int sophgo_dw_pcie_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct sophgo_dw_pcie *pcie; ++ struct dw_pcie_rp *pp; ++ struct resource_entry *win; ++ struct pci_host_bridge *bridge; ++ struct resource *res; ++ int ret = 0; ++ ++ dev_err(dev, "probe\n"); ++ ++ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); ++ if (!pcie) ++ return -ENOMEM; ++ ++ pcie->dev = dev; ++ ++ platform_set_drvdata(pdev, pcie); ++ ++ pp = &pcie->pp; ++ ++ raw_spin_lock_init(&pp->lock); ++ ++ ret = sophgo_dw_pcie_get_resources(pcie); ++ if (ret) ++ return ret; ++ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); ++ if (res) { ++ pp->cfg0_size = resource_size(res); ++ pp->cfg0_base = res->start; ++ ++ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res); ++ if (IS_ERR(pp->va_cfg0_base)) ++ return PTR_ERR(pp->va_cfg0_base); ++ } else { ++ dev_err(dev, "Missing *config* reg space\n"); ++ return -ENODEV; ++ } ++ ++ if (pcie->pcie_card) { ++ dev_err(dev, "pcie card mode, begin init pcie bus\n"); ++ ret = sophgo_pcie_host_init_port(pcie); ++ if (ret) ++ return ret; ++ ++ if (pcie->c2c_pcie_rc) ++ sophgo_pcie_config_cdma_route(pcie); ++ } ++ ++ bridge = devm_pci_alloc_host_bridge(dev, 0); ++ if (!bridge) ++ return -ENOMEM; ++ ++ pp->bridge = bridge; ++ ++ /* Get the I/O range from DT */ ++ win = resource_list_first_type(&bridge->windows, IORESOURCE_IO); ++ if (win) { ++ pp->io_size = resource_size(win->res); ++ pp->io_bus_addr = win->res->start - win->offset; ++ pp->io_base = pci_pio_to_address(win->res->start); ++ } ++ ++ /* Set default bus ops */ ++ bridge->ops = &sophgo_dw_pcie_ops; ++ bridge->child_ops = &sophgo_dw_child_pcie_ops; ++ sophgo_dw_pcie_version_detect(pcie); ++ sophgo_dw_pcie_iatu_detect(pcie); ++ ret = sophgo_dw_pcie_setup_rc(pp); ++ if (ret) ++ return ret; ++ ++ ret = sophgo_dw_pcie_setup_irq(pcie); ++ if (ret) ++ dev_err(dev, "pcie intx interrupt request fail, ret = %d\n", ret); ++ ++ bridge->sysdata = pp; ++ bridge->dev.parent = dev; ++ bridge->ops = &sophgo_dw_pcie_ops; ++ bridge->map_irq = of_irq_parse_and_map_pci; ++ bridge->swizzle_irq = pci_common_swizzle; ++ ++ ret = pci_host_probe(bridge); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++static const struct of_device_id sophgo_dw_pcie_of_match[] = { ++ { .compatible = "sophgo,sg2044-pcie-host", }, ++ { .compatible = "sophgo,bm1690-pcie-host", }, ++ {}, ++}; ++ ++static struct platform_driver sophgo_dw_pcie_driver = { ++ .driver = { ++ .name = "sophgo-dw-pcie", ++ .of_match_table = sophgo_dw_pcie_of_match, ++ .suppress_bind_attrs = true, ++ }, ++ .probe = sophgo_dw_pcie_probe, ++}; ++builtin_platform_driver(sophgo_dw_pcie_driver); ++ ++ ++#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) ++ ++static struct device *phb_dev; ++struct fwnode_handle *pci_host_bridge_acpi_get_fwnode(struct device *dev); ++ ++struct fwnode_handle *pci_host_bridge_acpi_get_fwnode(struct device *dev) ++{ ++ return acpi_fwnode_handle(to_acpi_device(phb_dev)); ++} ++ ++static int sophgo_pcie_init(struct pci_config_window *cfg) ++{ ++ struct device *dev = cfg->parent; ++ struct acpi_device *adev = to_acpi_device(dev); ++ struct acpi_pci_root *root = acpi_driver_data(adev); ++ struct sophgo_dw_pcie *pcie; ++ struct dw_pcie_rp *pp; ++ struct resource res[4]; ++ int ret = 0; ++ ++ struct list_head resource_list, *list; ++ struct resource_entry *win; ++ unsigned long flags; ++ ++ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); ++ if (!pcie) ++ return -ENOMEM; ++ ++ phb_dev = dev; ++ pcie->dev = dev; ++ pp = &pcie->pp; ++ ++ ret = acpi_get_rc_target_num_resources(dev, "SOPH0000", root->segment, res, 4); ++ ++ ret = sophgo_dw_pcie_get_resources_acpi(pcie, res); ++ if (ret) ++ return ret; ++ pp->cfg0_size = resource_size(&cfg->res); ++ pp->cfg0_base = cfg->res.start; ++ pp->va_cfg0_base = cfg->win; ++ ++ // Get the I/O range from ACPI Table ++ INIT_LIST_HEAD(&resource_list); ++ flags = IORESOURCE_IO; ++ ret = acpi_dev_get_resources(adev, &resource_list, acpi_dev_filter_resource_type_cb, (void *)flags); ++ if (ret < 0) { ++ pr_err("failed to parse _CRS method, error code %d\n", ret); ++ return ret; ++ } else if (ret == 0) { ++ pr_err("no IO and memory resources present in _CRS\n"); ++ return -EINVAL; ++ } ++ list = &resource_list; ++ ++ win = resource_list_first_type(list, IORESOURCE_IO); ++ if (win) { ++ pp->io_size = resource_size(win->res); ++ pp->io_bus_addr = win->res->start - win->offset; ++ pp->io_base = win->res->start; ++ } ++ ++ sophgo_dw_pcie_version_detect(pcie); ++ sophgo_dw_pcie_iatu_detect(pcie); ++ ret = sophgo_dw_pcie_setup_rc(pp); ++ ++ pci_msi_register_fwnode_provider(&pci_host_bridge_acpi_get_fwnode); ++ ++ cfg->priv = pp; ++ return 0; ++} ++ ++const struct pci_ecam_ops sophgo_pci_ecam_ops = { ++ .init = sophgo_pcie_init, ++ .pci_ops = { ++ .map_bus = sophgo_dw_pcie_own_conf_map_bus, ++ .read = sophgo_dw_pcie_rd_other_conf, ++ .write = sophgo_dw_pcie_wr_other_conf, ++ } ++}; ++ ++#endif +diff --git a/drivers/pci/controller/dwc/pcie-dw-sophgo.h b/drivers/pci/controller/dwc/pcie-dw-sophgo.h +new file mode 100644 +index 000000000000..000ac313bed6 +--- /dev/null ++++ b/drivers/pci/controller/dwc/pcie-dw-sophgo.h +@@ -0,0 +1,251 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Synopsys DesignWare PCIe host controller driver ++ * ++ * Copyright (C) 2013 Sophgo Co., Ltd. ++ * https://www.sophgo.com ++ * ++ * Author: Lionel Li ++ */ ++ ++#ifndef _PCIE_DW_SOPHGO_H ++#define _PCIE_DW_SOPHGO_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include "pcie-designware.h" ++ ++//PCIE CTRL REG ++#define PCIE_CTRL_INT_SIG_0_REG 0x048 ++#define PCIE_CTRL_SFT_RST_SIG_REG 0x050 ++#define PCIE_CTRL_REMAPPING_EN_REG 0x060 ++#define PCIE_CTRL_HNI_UP_START_ADDR_REG 0x064 ++#define PCIE_CTRL_HNI_UP_END_ADDR_REG 0x068 ++#define PCIE_CTRL_HNI_DW_ADDR_REG 0x06c ++#define PCIE_CTRL_SN_UP_START_ADDR_REG 0x070 ++#define PCIE_CTRL_SN_UP_END_ADDR_REG 0x074 ++#define PCIE_CTRL_SN_DW_ADDR_REG 0x078 ++#define PCIE_CTRL_AXI_MSI_GEN_CTRL_REG 0x07c ++#define PCIE_CTRL_AXI_MSI_GEN_LOWER_ADDR_REG 0x088 ++#define PCIE_CTRL_AXI_MSI_GEN_UPPER_ADDR_REG 0x08c ++#define PCIE_CTRL_AXI_MSI_GEN_USER_DATA_REG 0x090 ++#define PCIE_CTRL_AXI_MSI_GEN_MASK_IRQ_REG 0x094 ++#define PCIE_CTRL_IRQ_EN_REG 0x0a0 ++ ++#define PCIE_SII_GENERAL_CTRL1_REG 0x050 ++#define PCIE_SII_GENERAL_CTRL3_REG 0x058 ++ ++#define PCIE_CTRL_INT_SIG_0_PCIE_INTX_SHIFT_BIT 5 ++#define PCIE_CTRL_SFT_RST_SIG_COLD_RSTN_BIT 0 ++#define PCIE_CTRL_SFT_RST_SIG_PHY_RSTN_BIT 1 ++#define PCIE_CTRL_SFT_RST_SIG_WARM_RSTN_BIT 2 ++#define PCIE_CTRL_REMAP_EN_HNI_TO_PCIE_UP4G_EN_BIT 0 ++#define PCIE_CTRL_REMAP_EN_HNI_TO_PCIE_DW4G_EN_BIT 1 ++#define PCIE_CTRL_REMAP_EN_SN_TO_PCIE_UP4G_EN_BIT 2 ++#define PCIE_CTRL_REMAP_EN_SN_TO_PCIE_DW4G_EN_BIT 3 ++#define PCIE_CTRL_AXI_MSI_GEN_CTRL_MSI_GEN_EN_BIT 0 ++#define PCIE_CTRL_IRQ_EN_INTX_SHIFT_BIT 1 ++ ++#define CDMA_CSR_RCV_ADDR_H32 (0x1004) ++#define CDMA_CSR_RCV_ADDR_M16 (0x1008) ++#define CDMA_CSR_INTER_DIE_RW (0x100c) ++#define CDMA_CSR_4 (0x1010) ++#define CDMA_CSR_INTRA_DIE_RW (0x123c) ++ ++#define CDMA_CSR_RCV_CMD_OS 15 ++ ++// CDMA_CSR_INTER_DIE_RW ++#define CDMA_CSR_INTER_DIE_READ_ADDR_L4 0 ++#define CDMA_CSR_INTER_DIE_READ_ADDR_H4 4 ++#define CDMA_CSR_INTER_DIE_WRITE_ADDR_L4 8 ++#define CDMA_CSR_INTER_DIE_WRITE_ADDR_H4 12 ++ ++// CDMA_CSR_INTRA_DIE_RW ++#define CDMA_CSR_INTRA_DIE_READ_ADDR_L4 0 ++#define CDMA_CSR_INTRA_DIE_READ_ADDR_H4 4 ++#define CDMA_CSR_INTRA_DIE_WRITE_ADDR_L4 8 ++#define CDMA_CSR_INTRA_DIE_WRITE_ADDR_H4 12 ++ ++#define GENMASK_32(h, l) \ ++ (((0xFFFFFFFF) << (l)) & (0xFFFFFFFF >> (32UL - 1 - (h)))) ++ ++#define PCIE_SII_GENERAL_CTRL1_DEVICE_TYPE_MASK GENMASK_32(12, 9) ++#define PCIE_CTRL_AXI_MSI_GEN_CTRL_MSI_GEN_MULTI_MSI_MASK GENMASK_32(3, 1) ++ ++enum pcie_rst_status { ++ PCIE_RST_ASSERT = 0, ++ PCIE_RST_DE_ASSERT, ++ PCIE_RST_STATUS_BUTT ++}; ++ ++enum { ++ C2C_PCIE_X8_0 = 0b0101, ++ C2C_PCIE_X8_1 = 0b0111, ++ C2C_PCIE_X4_0 = 0b0100, ++ C2C_PCIE_X4_1 = 0b0110, ++ CXP_PCIE_X8 = 0b1010, ++ CXP_PCIE_X4 = 0b1011, ++}; ++ ++enum { ++ // RN: K2K; RNI: CCN ++ AXI_RNI = 0b1001, ++ AXI_RN = 0b1000, ++}; ++ ++struct PCIE_EQ_COEF { ++ uint32_t cursor; ++ uint32_t pre_cursor; ++ uint32_t post_cursor; ++}; ++ ++struct sophgo_dw_pcie { ++ struct device *dev; ++ void __iomem *dbi_base; ++ void __iomem *atu_base; ++ void __iomem *sii_reg_base; ++ void __iomem *ctrl_reg_base; ++ void __iomem *c2c_top; ++ void __iomem *cdma_reg_base; ++ uint64_t cfg_start_addr; ++ uint64_t cfg_end_addr; ++ uint64_t slv_start_addr; ++ uint64_t slv_end_addr; ++ uint64_t dw_start; ++ uint64_t dw_end; ++ uint64_t up_start_addr; ++ uint64_t cdma_pa_start; ++ uint64_t cdma_size; ++ uint32_t c2c_pcie_rc; ++ size_t atu_size; ++ uint32_t pcie_card; ++ uint32_t pcie_route_config; ++ u32 num_ib_windows; ++ u32 num_ob_windows; ++ u32 region_align; ++ u64 region_limit; ++ int irq; ++ struct irq_domain *intx_domain; ++ struct dw_pcie_rp pp; ++ const struct dw_pcie_ops *ops; ++ u32 version; ++ u32 type; ++ unsigned long caps; ++ int num_lanes; ++ int link_gen; ++ u8 n_fts[2]; ++ struct dw_edma_chip edma; ++ struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS]; ++ struct clk_bulk_data core_clks[DW_PCIE_NUM_CORE_CLKS]; ++ struct reset_control_bulk_data app_rsts[DW_PCIE_NUM_APP_RSTS]; ++ struct reset_control_bulk_data core_rsts[DW_PCIE_NUM_CORE_RSTS]; ++ int pe_rst; ++ struct phy *phy; ++}; ++ ++#define to_sophgo_dw_pcie_from_pp(port) container_of((port), struct sophgo_dw_pcie, pp) ++ ++extern struct irq_domain *sophgo_get_msi_irq_domain(void); ++u32 sophgo_dw_pcie_read_ctrl(struct sophgo_dw_pcie *pcie, u32 reg, size_t size); ++void sophgo_dw_pcie_write_ctrl(struct sophgo_dw_pcie *pcie, u32 reg, size_t size, u32 val); ++u32 sophgo_dw_pcie_read_dbi(struct sophgo_dw_pcie *pcie, u32 reg, size_t size); ++void sophgo_dw_pcie_write_dbi(struct sophgo_dw_pcie *pcie, u32 reg, size_t size, u32 val); ++u32 sophgo_dw_pcie_readl_atu(struct sophgo_dw_pcie *pcie, u32 dir, u32 index, u32 reg); ++void sophgo_dw_pcie_writel_atu(struct sophgo_dw_pcie *pcie, u32 dir, u32 index, u32 reg, u32 val); ++ ++static inline void sophgo_dw_pcie_writel_dbi(struct sophgo_dw_pcie *pcie, u32 reg, u32 val) ++{ ++ sophgo_dw_pcie_write_dbi(pcie, reg, 0x4, val); ++} ++ ++static inline u32 sophgo_dw_pcie_readl_dbi(struct sophgo_dw_pcie *pcie, u32 reg) ++{ ++ return sophgo_dw_pcie_read_dbi(pcie, reg, 0x4); ++} ++ ++static inline void sophgo_dw_pcie_writew_dbi(struct sophgo_dw_pcie *pcie, u32 reg, u16 val) ++{ ++ sophgo_dw_pcie_write_dbi(pcie, reg, 0x2, val); ++} ++ ++static inline u16 sophgo_dw_pcie_readw_dbi(struct sophgo_dw_pcie *pcie, u32 reg) ++{ ++ return sophgo_dw_pcie_read_dbi(pcie, reg, 0x2); ++} ++ ++static inline void sophgo_dw_pcie_writeb_dbi(struct sophgo_dw_pcie *pcie, u32 reg, u8 val) ++{ ++ sophgo_dw_pcie_write_dbi(pcie, reg, 0x1, val); ++} ++ ++static inline u8 sophgo_dw_pcie_readb_dbi(struct sophgo_dw_pcie *pcie, u32 reg) ++{ ++ return sophgo_dw_pcie_read_dbi(pcie, reg, 0x1); ++} ++ ++static inline void __iomem *sophgo_dw_pcie_select_atu(struct sophgo_dw_pcie *pcie, u32 dir, ++ u32 index) ++{ ++ if (dw_pcie_cap_is(pcie, IATU_UNROLL)) ++ return pcie->atu_base + PCIE_ATU_UNROLL_BASE(dir, index); ++ ++ sophgo_dw_pcie_writel_dbi(pcie, PCIE_ATU_VIEWPORT, dir | index); ++ return pcie->atu_base; ++} ++ ++static inline u32 sophgo_dw_pcie_readl_atu_ib(struct sophgo_dw_pcie *pcie, u32 index, u32 reg) ++{ ++ return sophgo_dw_pcie_readl_atu(pcie, PCIE_ATU_REGION_DIR_IB, index, reg); ++} ++ ++static inline void sophgo_dw_pcie_writel_atu_ib(struct sophgo_dw_pcie *pcie, u32 index, u32 reg, ++ u32 val) ++{ ++ sophgo_dw_pcie_writel_atu(pcie, PCIE_ATU_REGION_DIR_IB, index, reg, val); ++} ++ ++static inline u32 sophgo_dw_pcie_readl_atu_ob(struct sophgo_dw_pcie *pcie, u32 index, u32 reg) ++{ ++ return sophgo_dw_pcie_readl_atu(pcie, PCIE_ATU_REGION_DIR_OB, index, reg); ++} ++ ++static inline void sophgo_dw_pcie_writel_atu_ob(struct sophgo_dw_pcie *pcie, u32 index, u32 reg, ++ u32 val) ++{ ++ sophgo_dw_pcie_writel_atu(pcie, PCIE_ATU_REGION_DIR_OB, index, reg, val); ++} ++static inline void sophgo_dw_pcie_dbi_ro_wr_en(struct sophgo_dw_pcie *pci) ++{ ++ u32 reg = 0; ++ u32 val = 0; ++ ++ reg = PCIE_MISC_CONTROL_1_OFF; ++ val = sophgo_dw_pcie_readl_dbi(pci, reg); ++ val |= PCIE_DBI_RO_WR_EN; ++ sophgo_dw_pcie_writel_dbi(pci, reg, val); ++} ++ ++static inline void sophgo_dw_pcie_dbi_ro_wr_dis(struct sophgo_dw_pcie *pci) ++{ ++ u32 reg = 0; ++ u32 val = 0; ++ ++ reg = PCIE_MISC_CONTROL_1_OFF; ++ val = sophgo_dw_pcie_readl_dbi(pci, reg); ++ val &= ~PCIE_DBI_RO_WR_EN; ++ sophgo_dw_pcie_writel_dbi(pci, reg, val); ++} + -+extern struct irq_domain *cdns_pcie_get_parent_irq_domain(int intc_id); -+int check_vendor_id(struct pci_dev *dev, struct vendor_id_list vendor_id_list[], -+ size_t vendor_id_list_num); +#endif +diff --git a/drivers/pci/controller/dwc/pcie-ultrarisc.c b/drivers/pci/controller/dwc/pcie-ultrarisc.c +new file mode 100644 +index 000000000000..0be9d5d97f01 +--- /dev/null ++++ b/drivers/pci/controller/dwc/pcie-ultrarisc.c +@@ -0,0 +1,139 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * DWC PCIe RC driver for UltraRISC DP1000 SoC ++ * ++ * Copyright (C) 2023 UltraRISC ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "pcie-designware.h" ++ ++#define PCIE_CUS_CORE 0x400000 ++ ++#define LTSSM_ENABLE (1 << 7) ++#define FAST_LINK_MODE (1 << 12) ++#define HOLD_PHY_RST (1 << 14) ++#define L1SUB_DISABLE (1 << 15) ++ ++struct ultrarisc_pcie { ++ struct dw_pcie *pci; ++}; ++ ++static const struct of_device_id ultrarisc_pcie_of_match[]; ++ ++static const struct dw_pcie_host_ops ultrarisc_pcie_host_ops = { ++}; ++ ++static int ultrarisc_pcie_establish_link(struct dw_pcie *pci) ++{ ++ u32 val; ++ u8 cap_exp; ++ ++ val = dw_pcie_readl_dbi(pci, PCIE_CUS_CORE); ++ val &= ~FAST_LINK_MODE; ++ dw_pcie_writel_dbi(pci, PCIE_CUS_CORE, val); ++ ++ val = dw_pcie_readl_dbi(pci, PCIE_TIMER_CTRL_MAX_FUNC_NUM); ++ val &= ~PORT_FLT_SF_MASK; ++ val |= PORT_FLT_SF_64; ++ dw_pcie_writel_dbi(pci, PCIE_TIMER_CTRL_MAX_FUNC_NUM, val); ++ ++ cap_exp = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); ++ val = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCTL2); ++ val &= ~PCI_EXP_LNKCTL2_TLS; ++ val |= PCI_EXP_LNKCTL2_TLS_16_0GT; ++ dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCTL2, val); ++ ++ val = dw_pcie_readl_dbi(pci, PCIE_PORT_FORCE); ++ val &= ~PORT_LINK_NUM_MASK; ++ dw_pcie_writel_dbi(pci, PCIE_PORT_FORCE, val); ++ ++ val = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_DEVCTL2); ++ val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT; ++ val |= 0x6; ++ dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_DEVCTL2, val); ++ ++ val = dw_pcie_readl_dbi(pci, PCIE_CUS_CORE); ++ val &= ~(HOLD_PHY_RST | L1SUB_DISABLE); ++ val |= LTSSM_ENABLE; ++ dw_pcie_writel_dbi(pci, PCIE_CUS_CORE, val); ++ ++ return 0; ++} ++ ++static const struct dw_pcie_ops ultrarisc_pcie_ops = { ++ .start_link = ultrarisc_pcie_establish_link, ++}; ++ ++static int ultrarisc_pcie_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct ultrarisc_pcie *ultrarisc_pcie; ++ struct dw_pcie *pci; ++ struct dw_pcie_rp *pp; ++ int ret; ++ ++ ultrarisc_pcie = devm_kzalloc(dev, sizeof(*ultrarisc_pcie), GFP_KERNEL); ++ if (!ultrarisc_pcie) ++ return -ENOMEM; ++ ++ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); ++ if (!pci) ++ return -ENOMEM; ++ ++ pci->dev = dev; ++ pci->ops = &ultrarisc_pcie_ops; ++ ++ /* Set a default value suitable for at most 16 in and 16 out windows */ ++ pci->atu_size = SZ_8K; ++ ++ ultrarisc_pcie->pci = pci; ++ ++ pp = &pci->pp; ++ ++ platform_set_drvdata(pdev, ultrarisc_pcie); ++ ++ pp->irq = platform_get_irq(pdev, 1); ++ if (pp->irq < 0) ++ return pp->irq; ++ ++ pp->num_vectors = MAX_MSI_IRQS; ++ pp->ops = &ultrarisc_pcie_host_ops; ++ ++ ret = dw_pcie_host_init(pp); ++ if (ret) { ++ dev_err(dev, "Failed to initialize host\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static const struct of_device_id ultrarisc_pcie_of_match[] = { ++ { ++ .compatible = "ultrarisc,dw-pcie", ++ }, ++ {}, ++}; ++ ++static struct platform_driver ultrarisc_pcie_driver = { ++ .driver = { ++ .name = "ultrarisc-pcie", ++ .of_match_table = ultrarisc_pcie_of_match, ++ .suppress_bind_attrs = true, ++ }, ++ .probe = ultrarisc_pcie_probe, ++}; ++builtin_platform_driver(ultrarisc_pcie_driver); diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c -index 4eea161663b1..1222da5cdc89 100644 +index 4eea161663b1..6a36d630e101 100644 --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c -@@ -12,6 +12,7 @@ +@@ -9,6 +9,7 @@ + #include + #include + #include ++#include #include "../pci.h" #include "msi.h" -+#include "../controller/cadence/pcie-cadence-sophgo.h" - - #ifdef CONFIG_HISI_VIRTCCA_CODA +@@ -17,6 +18,34 @@ #include -@@ -850,66 +851,70 @@ int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int - } #endif -- if (maxvec < minvec) -- return -ERANGE; -+ if (check_vendor_id(dev, vendor_id_list, vendor_id_list_num)) { -+ if (maxvec < minvec) -+ return -ERANGE; - -- if (dev->msi_enabled) { -- pci_info(dev, "can't enable MSI-X (MSI already enabled)\n"); -- return -EINVAL; -- } -+ if (dev->msi_enabled) { -+ pci_info(dev, "can't enable MSI-X (MSI already enabled)\n"); -+ return -EINVAL; -+ } - -- if (WARN_ON_ONCE(dev->msix_enabled)) -- return -EINVAL; -+ if (WARN_ON_ONCE(dev->msix_enabled)) -+ return -EINVAL; - -- /* Check MSI-X early on irq domain enabled architectures */ -- if (!pci_msi_domain_supports(dev, MSI_FLAG_PCI_MSIX, ALLOW_LEGACY)) -- return -ENOTSUPP; -+ /* Check MSI-X early on irq domain enabled architectures */ -+ if (!pci_msi_domain_supports(dev, MSI_FLAG_PCI_MSIX, ALLOW_LEGACY)) -+ return -ENOTSUPP; - -- if (!pci_msi_supported(dev, nvec) || dev->current_state != PCI_D0) -- return -EINVAL; -+ if (!pci_msi_supported(dev, nvec) || dev->current_state != PCI_D0) -+ return -EINVAL; ++struct pci_msix_whitelist { ++ const char *device_name; ++ unsigned short vendor; ++ unsigned short device; ++}; ++ ++struct platform_msix_quirks { ++ const char *machine; ++ struct pci_msix_whitelist *list; ++ int count; ++}; ++ ++static struct pci_msix_whitelist mango_msix_whitelist[] = { ++ {"Inter X520", 0x8086, 0x10fb}, ++ {"Inter I40E", 0x8086, 0x1572}, ++ {"Sophgo sc11", 0x1f1c, 0x1690}, ++ {"Switchtec", 0x11f8, 0x4052}, ++ {"Mellanox ConnectX-2", 0x15b3, 0x6750} ++}; ++ ++static struct platform_msix_quirks quirks_table[] = { ++ { ++ .machine = "sophgo,mango", ++ .list = mango_msix_whitelist, ++ .count = ARRAY_SIZE(mango_msix_whitelist), ++ } ++}; ++ + int pci_msi_enable = 1; + int pci_msi_ignore_mask; -- hwsize = pci_msix_vec_count(dev); -- if (hwsize < 0) -- return hwsize; -+ hwsize = pci_msix_vec_count(dev); -+ if (hwsize < 0) -+ return hwsize; +@@ -836,10 +865,41 @@ static bool pci_msix_validate_entries(struct pci_dev *dev, struct msix_entry *en + return true; + } -- if (!pci_msix_validate_entries(dev, entries, nvec)) -- return -EINVAL; -+ if (!pci_msix_validate_entries(dev, entries, nvec)) ++static struct platform_msix_quirks *platform_has_msix_quirks(void) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(quirks_table); i++) { ++ if (of_machine_is_compatible(quirks_table[i].machine)) ++ return &quirks_table[i]; ++ } ++ ++ return NULL; ++} ++ ++static bool dev_is_in_msix_whitelist(struct pci_dev *dev, struct pci_msix_whitelist *list, int count) ++{ ++ int i; ++ ++ for (i = 0; i < count; i++) { ++ if ((dev->vendor == list[i].vendor) && (dev->device == list[i].device)) ++ return true; ++ } ++ ++ return false; ++} ++ + int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, + int maxvec, struct irq_affinity *affd, int flags) + { + int hwsize, rc, nvec = maxvec; ++ struct platform_msix_quirks *quirks; ++ ++ quirks = platform_has_msix_quirks(); ++ if (quirks) { ++ if (!dev_is_in_msix_whitelist(dev, quirks->list, quirks->count)) + return -EINVAL; ++ } -- if (hwsize < nvec) { -- /* Keep the IRQ virtual hackery working */ -- if (flags & PCI_IRQ_VIRTUAL) -- hwsize = nvec; -- else -- nvec = hwsize; -- } -+ if (hwsize < nvec) { -+ /* Keep the IRQ virtual hackery working */ -+ if (flags & PCI_IRQ_VIRTUAL) -+ hwsize = nvec; -+ else -+ nvec = hwsize; -+ } + #ifdef CONFIG_LOONGARCH + if (!disable_pci_irq_limit) { +@@ -849,7 +909,6 @@ int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int + } + } + #endif +- + if (maxvec < minvec) + return -ERANGE; -- if (nvec < minvec) -- return -ENOSPC; -+ if (nvec < minvec) -+ return -ENOSPC; +diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c +index 05b7357bd258..cc6cac9395c8 100644 +--- a/drivers/pci/pci-acpi.c ++++ b/drivers/pci/pci-acpi.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -29,7 +30,7 @@ const guid_t pci_acpi_dsm_guid = + GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a, + 0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d); -- rc = pci_setup_msi_context(dev); -- if (rc) -- return rc; -+ rc = pci_setup_msi_context(dev); -+ if (rc) -+ return rc; +-#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64) ++#if defined(CONFIG_PCI_QUIRKS) && (defined(CONFIG_ARM64) || defined(CONFIG_RISCV)) + static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res) + { + struct device *dev = &adev->dev; +@@ -60,6 +61,41 @@ static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res) + return 0; + } -- if (!pci_setup_msix_device_domain(dev, hwsize)) -- return -ENODEV; -+ if (!pci_setup_msix_device_domain(dev, hwsize)) -+ return -ENODEV; ++static int acpi_get_rc_addrs(struct acpi_device *adev, struct resource *res_array, int max_resources) ++{ ++ struct device *dev = &adev->dev; ++ struct resource_entry *entry; ++ struct list_head list; ++ unsigned long flags; ++ int ret, count = 0; ++ ++ INIT_LIST_HEAD(&list); ++ flags = IORESOURCE_MEM; ++ ret = acpi_dev_get_resources(adev, &list, ++ acpi_dev_filter_resource_type_cb, ++ (void *) flags); ++ if (ret < 0) { ++ dev_err(dev, "failed to parse _CRS method, error code %d\n", ret); ++ return ret; ++ } ++ ++ if (ret == 0) { ++ dev_err(dev, "no IO and memory resources present in _CRS\n"); ++ return -EINVAL; ++ } ++ ++ list_for_each_entry(entry, &list, node) { ++ if (count >= max_resources) ++ break; ++ res_array[count++] = *entry->res; ++ } ++ ++ acpi_dev_free_resource_list(&list); ++ ++ return count; ++} ++ ++ + static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context, + void **retval) + { +@@ -103,6 +139,35 @@ int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment, -- for (;;) { -- if (affd) { -- nvec = irq_calc_affinity_vectors(minvec, nvec, affd); -- if (nvec < minvec) -- return -ENOSPC; -- } -+ for (;;) { -+ if (affd) { -+ nvec = irq_calc_affinity_vectors(minvec, nvec, affd); -+ if (nvec < minvec) -+ return -ENOSPC; -+ } - -- rc = msix_capability_init(dev, entries, nvec, affd); -- if (rc == 0) -- return nvec; -+ rc = msix_capability_init(dev, entries, nvec, affd); -+ if (rc == 0) -+ return nvec; - -- if (rc < 0) -- return rc; -- if (rc < minvec) -- return -ENOSPC; -+ if (rc < 0) -+ return rc; -+ if (rc < minvec) -+ return -ENOSPC; + return 0; + } ++ ++int acpi_get_rc_target_num_resources(struct device *dev, const char *hid, u16 segment, ++ struct resource *res_array, int max_resources) ++{ ++ struct acpi_device *adev; ++ acpi_status status; ++ acpi_handle handle; ++ int ret; ++ ++ status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle); ++ if (ACPI_FAILURE(status)) { ++ dev_err(dev, "can't find _HID %s device to locate resources\n", ++ hid); ++ return -ENODEV; ++ } ++ ++ adev = acpi_fetch_acpi_dev(handle); ++ if (!adev) ++ return -ENODEV; ++ ++ ret = acpi_get_rc_addrs(adev, res_array, max_resources); ++ if (ret < 0) { ++ dev_err(dev, "can't get resources from %s, error code %d\n", dev_name(&adev->dev), ret); ++ return ret; ++ } ++ ++ return ret; ++} ++ + #endif -- nvec = rc; -+ nvec = rc; -+ } -+ } else { -+ return -1; - } + phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) +@@ -1518,3 +1583,184 @@ static int __init acpi_pci_init(void) + return 0; } + arch_initcall(acpi_pci_init); ++ ++#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) ++ ++/* ++ * Try to assign the IRQ number when probing a new device ++ */ ++int pcibios_alloc_irq(struct pci_dev *dev) ++{ ++ if (!acpi_disabled) ++ acpi_pci_irq_enable(dev); ++ ++ return 0; ++} ++ ++struct acpi_pci_generic_root_info { ++ struct acpi_pci_root_info common; ++ struct pci_config_window *cfg; /* config space mapping */ ++}; ++ ++int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) ++{ ++ struct pci_config_window *cfg = bus->sysdata; ++ struct acpi_device *adev = to_acpi_device(cfg->parent); ++ struct acpi_pci_root *root = acpi_driver_data(adev); ++ ++ return root->segment; ++} ++ ++int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) ++{ ++ struct pci_config_window *cfg; ++ struct acpi_device *adev; ++ struct device *bus_dev; ++ ++ if (acpi_disabled) ++ return 0; ++ ++ cfg = bridge->bus->sysdata; ++ ++ /* ++ * On Hyper-V there is no corresponding ACPI device for a root bridge, ++ * therefore ->parent is set as NULL by the driver. And set 'adev' as ++ * NULL in this case because there is no proper ACPI device. ++ */ ++ if (!cfg->parent) ++ adev = NULL; ++ else ++ adev = to_acpi_device(cfg->parent); ++ ++ bus_dev = &bridge->bus->dev; ++ ++ ACPI_COMPANION_SET(&bridge->dev, adev); ++ set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev))); ++ ++ return 0; ++} ++ ++static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci) ++{ ++ struct resource_entry *entry, *tmp; ++ int status; ++ ++ status = acpi_pci_probe_root_resources(ci); ++ resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { ++ if (!(entry->res->flags & IORESOURCE_WINDOW)) ++ resource_list_destroy_entry(entry); ++ } ++ return status; ++} ++ ++/* ++ * Lookup the bus range for the domain in MCFG, and set up config space ++ * mapping. ++ */ ++static struct pci_config_window * ++pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root) ++{ ++ struct device *dev = &root->device->dev; ++ struct resource *bus_res = &root->secondary; ++ u16 seg = root->segment; ++ const struct pci_ecam_ops *ecam_ops; ++ struct resource cfgres; ++ struct acpi_device *adev; ++ struct pci_config_window *cfg; ++ int ret; ++ ++ ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops); ++ if (ret) { ++ dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res); ++ return NULL; ++ } ++ ++ adev = acpi_resource_consumer(&cfgres); ++ if (adev) ++ dev_info(dev, "ECAM area %pR reserved by %s\n", &cfgres, ++ dev_name(&adev->dev)); ++ else ++ dev_warn(dev, FW_BUG "ECAM area %pR not reserved in ACPI namespace\n", ++ &cfgres); ++ ++ cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops); ++ if (IS_ERR(cfg)) { ++ dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, ++ PTR_ERR(cfg)); ++ return NULL; ++ } ++ ++ return cfg; ++} ++ ++/* release_info: free resources allocated by init_info */ ++static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci) ++{ ++ struct acpi_pci_generic_root_info *ri; ++ ++ ri = container_of(ci, struct acpi_pci_generic_root_info, common); ++ pci_ecam_free(ri->cfg); ++ kfree(ci->ops); ++ kfree(ri); ++} ++ ++/* Interface called from ACPI code to setup PCI host controller */ ++struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) ++{ ++ struct acpi_pci_generic_root_info *ri; ++ struct pci_bus *bus, *child; ++ struct acpi_pci_root_ops *root_ops; ++ struct pci_host_bridge *host; ++ ++ ri = kzalloc(sizeof(*ri), GFP_KERNEL); ++ if (!ri) ++ return NULL; ++ ++ root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL); ++ if (!root_ops) { ++ kfree(ri); ++ return NULL; ++ } ++ ++ ri->cfg = pci_acpi_setup_ecam_mapping(root); ++ if (!ri->cfg) { ++ kfree(ri); ++ kfree(root_ops); ++ return NULL; ++ } ++ ++ root_ops->release_info = pci_acpi_generic_release_info; ++ root_ops->prepare_resources = pci_acpi_root_prepare_resources; ++ root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops; ++ bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg); ++ if (!bus) ++ return NULL; ++ ++ /* If we must preserve the resource configuration, claim now */ ++ host = pci_find_host_bridge(bus); ++ if (host->preserve_config) ++ pci_bus_claim_resources(bus); ++ ++ /* ++ * Assign whatever was left unassigned. If we didn't claim above, ++ * this will reassign everything. ++ */ ++ pci_assign_unassigned_root_bus_resources(bus); ++ ++ list_for_each_entry(child, &bus->children, node) ++ pcie_bus_configure_settings(child); ++ ++ return bus; ++} ++ ++void pcibios_add_bus(struct pci_bus *bus) ++{ ++ acpi_pci_add_bus(bus); ++} ++ ++void pcibios_remove_bus(struct pci_bus *bus) ++{ ++ acpi_pci_remove_bus(bus); ++} ++ ++#endif +diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h +index f6869fc8f49b..e70fdb1bb006 100644 +--- a/drivers/pci/pci.h ++++ b/drivers/pci/pci.h +@@ -611,9 +611,11 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, bool probe) + } + #endif +-#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64) ++#if defined(CONFIG_PCI_QUIRKS) && (defined(CONFIG_ARM64) || defined(CONFIG_RISCV)) + int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment, + struct resource *res); ++int acpi_get_rc_target_num_resources(struct device *dev, const char *hid, u16 segment, ++ struct resource *res_array, int max_resources); + #else + static inline int acpi_get_rc_resources(struct device *dev, const char *hid, + u16 segment, struct resource *res) diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c index 46fad0d813b2..560b3a236d84 100644 --- a/drivers/pci/pcie/portdrv.c @@ -444525,6 +485290,31 @@ index 46fad0d813b2..560b3a236d84 100644 /* * If the user specified "pcie_ports=native", use the PCIe services regardless +diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig +index f608c2e66235..dad83faccf44 100644 +--- a/drivers/perf/Kconfig ++++ b/drivers/perf/Kconfig +@@ -86,6 +86,20 @@ config RISCV_PMU_SBI + full perf feature support i.e. counter overflow, privilege mode + filtering, counter configuration. + ++config ANDES_CUSTOM_PMU ++ bool "Andes custom PMU support" ++ depends on ARCH_RENESAS && RISCV_ALTERNATIVE && RISCV_PMU_SBI ++ default y ++ help ++ The Andes cores implement the PMU overflow extension very ++ similar to the standard Sscofpmf and Smcntrpmf extension. ++ ++ This will patch the overflow and pending CSRs and handle the ++ non-standard behaviour via the regular SBI PMU driver and ++ interface. ++ ++ If you don't know what to do here, say "Y". ++ + config ARM_PMU_ACPI + depends on ARM_PMU && ACPI + def_bool y diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index 31e491e7f206..2946422539fb 100644 --- a/drivers/perf/arm_smmuv3_pmu.c @@ -444547,6 +485337,108 @@ index 31e491e7f206..2946422539fb 100644 if (ret) { dev_warn(dev, "failed to allocate MSIs\n"); return; +diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c +index 901da688ea3f..685938868681 100644 +--- a/drivers/perf/riscv_pmu_sbi.c ++++ b/drivers/perf/riscv_pmu_sbi.c +@@ -19,10 +19,36 @@ + #include + #include + #include ++#include + + #include + #include +-#include ++#include ++#include ++#include ++ ++#define ALT_SBI_PMU_OVERFLOW(__ovl) \ ++asm volatile(ALTERNATIVE_2( \ ++ "csrr %0, " __stringify(CSR_SSCOUNTOVF), \ ++ "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \ ++ THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \ ++ CONFIG_ERRATA_THEAD_PMU, \ ++ "csrr %0, " __stringify(ANDES_CSR_SCOUNTEROF), \ ++ ANDES_VENDOR_ID, \ ++ RISCV_ISA_VENDOR_EXT_XANDESPMU + RISCV_VENDOR_EXT_ALTERNATIVES_BASE, \ ++ CONFIG_ANDES_CUSTOM_PMU) \ ++ : "=r" (__ovl) : \ ++ : "memory") ++ ++#define ALT_SBI_PMU_OVF_CLEAR_PENDING(__irq_mask) \ ++asm volatile(ALTERNATIVE( \ ++ "csrc " __stringify(CSR_IP) ", %0\n\t", \ ++ "csrc " __stringify(ANDES_CSR_SLIP) ", %0\n\t", \ ++ ANDES_VENDOR_ID, \ ++ RISCV_ISA_VENDOR_EXT_XANDESPMU + RISCV_VENDOR_EXT_ALTERNATIVES_BASE, \ ++ CONFIG_ANDES_CUSTOM_PMU) \ ++ : : "r"(__irq_mask) \ ++ : "memory") + + #define SYSCTL_NO_USER_ACCESS 0 + #define SYSCTL_USER_ACCESS 1 +@@ -61,6 +87,7 @@ static int sysctl_perf_user_access __read_mostly = SYSCTL_USER_ACCESS; + static union sbi_pmu_ctr_info *pmu_ctr_list; + static bool riscv_pmu_use_irq; + static unsigned int riscv_pmu_irq_num; ++static unsigned int riscv_pmu_irq_mask; + static unsigned int riscv_pmu_irq; + + /* Cache the available counters in a bitmask */ +@@ -694,7 +721,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) + + event = cpu_hw_evt->events[fidx]; + if (!event) { +- csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num)); ++ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask); + return IRQ_NONE; + } + +@@ -708,7 +735,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) + * Overflow interrupt pending bit should only be cleared after stopping + * all the counters to avoid any race condition. + */ +- csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num)); ++ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask); + + /* No overflow bit is set */ + if (!overflow) +@@ -780,8 +807,7 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node) + + if (riscv_pmu_use_irq) { + cpu_hw_evt->irq = riscv_pmu_irq; +- csr_clear(CSR_IP, BIT(riscv_pmu_irq_num)); +- csr_set(CSR_IE, BIT(riscv_pmu_irq_num)); ++ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask); + enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE); + } + +@@ -792,7 +818,6 @@ static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node) + { + if (riscv_pmu_use_irq) { + disable_percpu_irq(riscv_pmu_irq); +- csr_clear(CSR_IE, BIT(riscv_pmu_irq_num)); + } + + /* Disable all counters access for user mode now */ +@@ -816,8 +841,15 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde + riscv_cached_mimpid(0) == 0) { + riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU; + riscv_pmu_use_irq = true; ++ } else if (riscv_has_vendor_extension_unlikely(ANDES_VENDOR_ID, ++ RISCV_ISA_VENDOR_EXT_XANDESPMU) && ++ IS_ENABLED(CONFIG_ANDES_CUSTOM_PMU)) { ++ riscv_pmu_irq_num = ANDES_SLI_CAUSE_BASE + ANDES_RV_IRQ_PMOVI; ++ riscv_pmu_use_irq = true; + } + ++ riscv_pmu_irq_mask = BIT(riscv_pmu_irq_num % BITS_PER_LONG); ++ + if (!riscv_pmu_use_irq) + return -EOPNOTSUPP; + diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index e4502958fd62..50f729360df1 100644 --- a/drivers/phy/Kconfig @@ -445428,7 +486320,7 @@ index 000000000000..931c9d9124e9 +MODULE_DESCRIPTION("Synopsys DesignWare MIPI DPHY driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig -index 79753411b778..12e411726bcc 100644 +index 79753411b778..7dff1019211b 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -22,7 +22,7 @@ config PINCONF @@ -445440,7 +486332,28 @@ index 79753411b778..12e411726bcc 100644 select PINCONF config DEBUG_PINCTRL -@@ -469,6 +469,15 @@ config PINCTRL_TB10X +@@ -427,6 +427,20 @@ config PINCTRL_SINGLE + help + This selects the device tree based generic pinctrl driver. + ++config PINCTRL_SPACEMIT_P1 ++ tristate "Spacemit P1 PMIC pinctrl and GPIO Support" ++ depends on MFD_SPACEMIT_P1 ++ depends on OF ++ select PINMUX ++ select GENERIC_PINCONF ++ select GPIOLIB ++ help ++ P1 PMICs provides multiple GPIOs that can be muxed for different ++ functions. This driver bundles a pinctrl driver to select the function ++ muxing and a GPIO driver to handle the GPIO when the GPIO function is ++ selected. ++ Say Y to enable pinctrl and GPIO support for the P1 PMIC. ++ + config PINCTRL_ST + bool + depends on OF +@@ -469,6 +483,15 @@ config PINCTRL_TB10X depends on OF && ARC_PLAT_TB10X select GPIOLIB @@ -445456,11 +486369,42 @@ index 79753411b778..12e411726bcc 100644 config PINCTRL_ZYNQ bool "Pinctrl driver for Xilinx Zynq" depends on ARCH_ZYNQ +@@ -505,6 +528,17 @@ config PINCTRL_MLXBF3 + each pin. This driver can also be built as a module called + pinctrl-mlxbf3. + ++config PINCTRL_SPACEMIT_K1X ++ bool "Spacemit k1x pinctrl driver" ++ depends on SOC_SPACEMIT_K1X ++ depends on OF ++ depends on HAS_IOMEM ++ select GENERIC_PINCTRL_GROUPS ++ select GENERIC_PINMUX_FUNCTIONS ++ select GENERIC_PINCONF ++ help ++ This support pinctrl driver for Spacemit k1x SoC. ++ + source "drivers/pinctrl/actions/Kconfig" + source "drivers/pinctrl/aspeed/Kconfig" + source "drivers/pinctrl/bcm/Kconfig" +@@ -534,5 +568,6 @@ source "drivers/pinctrl/ti/Kconfig" + source "drivers/pinctrl/uniphier/Kconfig" + source "drivers/pinctrl/visconti/Kconfig" + source "drivers/pinctrl/vt8500/Kconfig" ++source "drivers/pinctrl/ultrarisc/Kconfig" + + endif diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile -index 4275eca92488..f07a2ee92197 100644 +index 4275eca92488..6ad6de6a67ca 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile -@@ -48,6 +48,7 @@ obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o +@@ -44,10 +44,13 @@ obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o + obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o + obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o + obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o ++obj-$(CONFIG_PINCTRL_SPACEMIT_K1X) += pinctrl-spacemit-k1x.o ++obj-$(CONFIG_PINCTRL_SPACEMIT_P1) += pinctrl-spacemit-p1.o + obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o obj-$(CONFIG_PINCTRL_SX150X) += pinctrl-sx150x.o obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o @@ -445468,7 +486412,7 @@ index 4275eca92488..f07a2ee92197 100644 obj-$(CONFIG_PINCTRL_ZYNQMP) += pinctrl-zynqmp.o obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o -@@ -75,6 +76,7 @@ obj-$(CONFIG_SOC_STARFIVE) += starfive/ +@@ -75,8 +78,10 @@ obj-$(CONFIG_SOC_STARFIVE) += starfive/ obj-$(CONFIG_PINCTRL_STM32) += stm32/ obj-y += sunplus/ obj-$(CONFIG_PINCTRL_SUNXI) += sunxi/ @@ -445476,6 +486420,2753 @@ index 4275eca92488..f07a2ee92197 100644 obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-y += ti/ obj-$(CONFIG_PINCTRL_UNIPHIER) += uniphier/ + obj-$(CONFIG_PINCTRL_VISCONTI) += visconti/ + obj-$(CONFIG_ARCH_VT8500) += vt8500/ ++obj-$(CONFIG_ARCH_ULTRARISC) += ultrarisc/ +diff --git a/drivers/pinctrl/pinctrl-spacemit-k1x.c b/drivers/pinctrl/pinctrl-spacemit-k1x.c +new file mode 100644 +index 000000000000..feed95179f33 +--- /dev/null ++++ b/drivers/pinctrl/pinctrl-spacemit-k1x.c +@@ -0,0 +1,2101 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Generic device tree based pinctrl driver for one register per pin ++ * type pinmux controllers ++ * ++ * Copyright (C) 2012 Texas Instruments, Inc. ++ * Copyright (C) 2023 Spacemit Co., Ltd. ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "core.h" ++#include "devicetree.h" ++#include "pinconf.h" ++#include "pinmux.h" ++ ++#define DRIVER_NAME "pinctrl-single" ++#define PCS_OFF_DISABLED ~0U ++ ++#define EDGE_CLEAR 6 ++#define EDGE_FALL_EN 5 ++#define EDGE_RISE_EN 4 ++ ++#define PCS_CONTEXT_LOSS_OFF BIT(3) ++#define PCS_QUIRK_SHARED_IRQ BIT(2) ++#define PCS_FEAT_IRQ BIT(1) ++#define PCS_FEAT_PINCONF BIT(0) ++ ++#define PCS_QUIRK_HAS_SHARED_IRQ(_pcs) (_pcs->flags & PCS_QUIRK_SHARED_IRQ) ++#define PCS_HAS_IRQ(_pcs) (_pcs->flags & PCS_FEAT_IRQ) ++#define PCS_HAS_PINCONF(_pcs) (_pcs->flags & PCS_FEAT_PINCONF) ++ ++/** ++ * struct pcs_func_vals - mux function register offset and value pair ++ * @reg: register virtual address ++ * @val: register value ++ * @mask: mask ++ */ ++struct pcs_func_vals { ++ void __iomem *reg; ++ unsigned int val; ++ unsigned int mask; ++}; ++ ++/** ++ * struct pcs_conf_vals - pinconf parameter, pinconf register offset ++ * and value, enable, disable, mask ++ * @param: config parameter ++ * @val: user input bits in the pinconf register ++ * @enable: enable bits in the pinconf register ++ * @disable: disable bits in the pinconf register ++ * @mask: mask bits in the register value ++ */ ++struct pcs_conf_vals { ++ enum pin_config_param param; ++ unsigned int val; ++ unsigned int enable; ++ unsigned int disable; ++ unsigned int mask; ++}; ++ ++/** ++ * struct pcs_conf_type - pinconf property name, pinconf param pair ++ * @name: property name in DTS file ++ * @param: config parameter ++ */ ++struct pcs_conf_type { ++ const char *name; ++ enum pin_config_param param; ++}; ++ ++/** ++ * struct pcs_function - pinctrl function ++ * @name: pinctrl function name ++ * @vals: register and vals array ++ * @nvals: number of entries in vals array ++ * @pgnames: array of pingroup names the function uses ++ * @npgnames: number of pingroup names the function uses ++ * @conf: array of pin configurations ++ * @nconfs: number of pin configurations available ++ * @node: list node ++ */ ++struct pcs_function { ++ const char *name; ++ struct pcs_func_vals *vals; ++ unsigned int nvals; ++ const char **pgnames; ++ int npgnames; ++ struct pcs_conf_vals *conf; ++ int nconfs; ++ struct list_head node; ++}; ++ ++/** ++ * struct pcs_gpiofunc_range - pin ranges with same mux value of gpio function ++ * @offset: offset base of pins ++ * @npins: number pins with the same mux value of gpio function ++ * @gpiofunc: mux value of gpio function ++ * @node: list node ++ */ ++struct pcs_gpiofunc_range { ++ unsigned int offset; ++ unsigned int npins; ++ unsigned int gpiofunc; ++ struct list_head node; ++}; ++ ++/** ++ * struct pcs_data - wrapper for data needed by pinctrl framework ++ * @pa: pindesc array ++ * @cur: index to current element ++ * ++ * REVISIT: We should be able to drop this eventually by adding ++ * support for registering pins individually in the pinctrl ++ * framework for those drivers that don't need a static array. ++ */ ++struct pcs_data { ++ struct pinctrl_pin_desc *pa; ++ int cur; ++}; ++ ++/** ++ * struct pcs_soc_data - SoC specific settings ++ * @flags: initial SoC specific PCS_FEAT_xxx values ++ * @irq: optional interrupt for the controller ++ * @irq_enable_mask: optional SoC specific interrupt enable mask ++ * @irq_status_mask: optional SoC specific interrupt status mask ++ * @rearm: optional SoC specific wake-up rearm function ++ */ ++struct pcs_soc_data { ++ unsigned int flags; ++ int irq; ++ unsigned int irq_enable_mask; ++ unsigned int irq_status_mask; ++ void (*rearm)(void); ++}; ++ ++/** ++ * struct pcs_device - pinctrl device instance ++ * @res: resources ++ * @base: virtual address of the controller ++ * @saved_vals: saved values for the controller ++ * @size: size of the ioremapped area ++ * @dev: device entry ++ * @np: device tree node ++ * @pctl: pin controller device ++ * @flags: mask of PCS_FEAT_xxx values ++ * @missing_nr_pinctrl_cells: for legacy binding, may go away ++ * @socdata: soc specific data ++ * @lock: spinlock for register access ++ * @mutex: mutex protecting the lists ++ * @width: bits per mux register ++ * @fmask: function register mask ++ * @fshift: function register shift ++ * @foff: value to turn mux off ++ * @fmax: max number of functions in fmask ++ * @bits_per_mux: number of bits per mux ++ * @bits_per_pin: number of bits per pin ++ * @pins: physical pins on the SoC ++ * @gpiofuncs: list of gpio functions ++ * @irqs: list of interrupt registers ++ * @chip: chip container for this instance ++ * @domain: IRQ domain for this instance ++ * @desc: pin controller descriptor ++ * @read: register read function to use ++ * @write: register write function to use ++ */ ++struct pcs_device { ++ struct resource *res; ++ void __iomem *base; ++ struct resource *gedge_flag_res; ++ void __iomem *gedge_flag_base; ++ unsigned int gedge_flag_size; ++ struct resource gpio_res; ++ void __iomem *gpio_base; ++ unsigned int gpio_size; ++ void *saved_vals; ++ unsigned int size; ++ struct device *dev; ++ struct device_node *np; ++ struct pinctrl_dev *pctl; ++ unsigned int flags; ++ struct property *missing_nr_pinctrl_cells; ++ struct pcs_soc_data socdata; ++ raw_spinlock_t lock; ++ struct mutex mutex; ++ unsigned int width; ++ unsigned int fmask; ++ unsigned int fshift; ++ unsigned int foff; ++ unsigned int fmax; ++ bool bits_per_mux; ++ unsigned int bits_per_pin; ++ struct pcs_data pins; ++ struct list_head gpiofuncs; ++ struct list_head irqs; ++ struct irq_chip chip; ++ struct irq_domain *domain; ++ struct pinctrl_desc desc; ++ unsigned int (*read)(void __iomem *reg); ++ void (*write)(unsigned int val, void __iomem *reg); ++}; ++ ++static int pcs_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin, ++ unsigned long *config); ++static int pcs_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, ++ unsigned long *configs, unsigned int num_configs); ++ ++static enum pin_config_param pcs_bias[] = { ++ PIN_CONFIG_BIAS_PULL_DOWN, ++ PIN_CONFIG_BIAS_PULL_UP, ++}; ++ ++/* ++ * This lock class tells lockdep that irqchip core that this single ++ * pinctrl can be in a different category than its parents, so it won't ++ * report false recursion. ++ */ ++static struct lock_class_key pcs_lock_class; ++ ++/* Class for the IRQ request mutex */ ++static struct lock_class_key pcs_request_class; ++ ++/* ++ * REVISIT: Reads and writes could eventually use regmap or something ++ * generic. But at least on omaps, some mux registers are performance ++ * critical as they may need to be remuxed every time before and after ++ * idle. Adding tests for register access width for every read and ++ * write like regmap is doing is not desired, and caching the registers ++ * does not help in this case. ++ */ ++ ++static unsigned int __maybe_unused pcs_readb(void __iomem *reg) ++{ ++ return readb(reg); ++} ++ ++static unsigned int __maybe_unused pcs_readw(void __iomem *reg) ++{ ++ return readw(reg); ++} ++ ++static unsigned int __maybe_unused pcs_readl(void __iomem *reg) ++{ ++ return readl(reg); ++} ++ ++static void __maybe_unused pcs_writeb(unsigned int val, void __iomem *reg) ++{ ++ writeb(val, reg); ++} ++ ++static void __maybe_unused pcs_writew(unsigned int val, void __iomem *reg) ++{ ++ writew(val, reg); ++} ++ ++static void __maybe_unused pcs_writel(unsigned int val, void __iomem *reg) ++{ ++ writel(val, reg); ++} ++ ++static unsigned int pcs_pin_reg_offset_get(struct pcs_device *pcs, ++ unsigned int pin) ++{ ++ unsigned int pin_offset_bytes; ++ unsigned int mux_bytes = pcs->width / BITS_PER_BYTE; ++ ++ if (pcs->bits_per_mux) { ++ pin_offset_bytes = (pcs->bits_per_pin * pin) / BITS_PER_BYTE; ++ return (pin_offset_bytes / mux_bytes) * mux_bytes; ++ } ++ ++ return pin * mux_bytes; ++} ++ ++static unsigned int pcs_pin_shift_reg_get(struct pcs_device *pcs, ++ unsigned int pin) ++{ ++ return (pin % (pcs->width / pcs->bits_per_pin)) * pcs->bits_per_pin; ++} ++ ++static void pcs_pin_dbg_show(struct pinctrl_dev *pctldev, ++ struct seq_file *s, ++ unsigned int pin) ++{ ++ struct pcs_device *pcs; ++ unsigned int val; ++ unsigned long offset; ++ size_t pa; ++ ++ pcs = pinctrl_dev_get_drvdata(pctldev); ++ ++ offset = pcs_pin_reg_offset_get(pcs, pin); ++ val = pcs->read(pcs->base + offset); ++ ++ if (pcs->bits_per_mux) ++ val &= pcs->fmask << pcs_pin_shift_reg_get(pcs, pin); ++ ++ pa = pcs->res->start + offset; ++ ++ seq_printf(s, "%zx %08x %s ", pa, val, DRIVER_NAME); ++} ++ ++static void pcs_dt_free_map(struct pinctrl_dev *pctldev, struct pinctrl_map *map, ++ unsigned int num_maps) ++{ ++ struct pcs_device *pcs; ++ ++ pcs = pinctrl_dev_get_drvdata(pctldev); ++ devm_kfree(pcs->dev, map); ++} ++ ++static int pcs_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node *np_config, ++ struct pinctrl_map **map, unsigned int *num_maps); ++ ++static const struct pinctrl_ops pcs_pinctrl_ops = { ++ .get_groups_count = pinctrl_generic_get_group_count, ++ .get_group_name = pinctrl_generic_get_group_name, ++ .get_group_pins = pinctrl_generic_get_group_pins, ++ .pin_dbg_show = pcs_pin_dbg_show, ++ .dt_node_to_map = pcs_dt_node_to_map, ++ .dt_free_map = pcs_dt_free_map, ++}; ++ ++static int pcs_get_function(struct pinctrl_dev *pctldev, unsigned int pin, ++ struct pcs_function **func) ++{ ++ unsigned int fselector; ++ struct function_desc *function; ++ const struct pinctrl_setting_mux *setting; ++ struct pin_desc *pdesc = pin_desc_get(pctldev, pin); ++ struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev); ++ ++ /* If pin is not described in DTS & enabled, mux_setting is NULL. */ ++ setting = pdesc->mux_setting; ++ if (!setting) ++ return -EOPNOTSUPP; ++ fselector = setting->func; ++ function = pinmux_generic_get_function(pctldev, fselector); ++ if (!function) ++ return -EINVAL; ++ *func = function->data; ++ if (!(*func)) { ++ dev_err(pcs->dev, "%s could not find function%i\n", ++ __func__, fselector); ++ return -EOPNOTSUPP; ++ } ++ return 0; ++} ++ ++static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned int fselector, unsigned int group) ++{ ++ int i; ++ unsigned long flags; ++ unsigned int val, mask; ++ struct pcs_device *pcs; ++ struct pcs_function *func; ++ struct pcs_func_vals *vals; ++ struct function_desc *function; ++ ++ pcs = pinctrl_dev_get_drvdata(pctldev); ++ /* If function mask is null, needn't enable it. */ ++ if (!pcs->fmask) ++ return 0; ++ function = pinmux_generic_get_function(pctldev, fselector); ++ if (!function) ++ return -EINVAL; ++ func = function->data; ++ if (!func) ++ return -EINVAL; ++ ++ dev_dbg(pcs->dev, "enabling %s function%i\n", ++ func->name, fselector); ++ ++ for (i = 0; i < func->nvals; i++) { ++ vals = &func->vals[i]; ++ raw_spin_lock_irqsave(&pcs->lock, flags); ++ val = pcs->read(vals->reg); ++ ++ if (pcs->bits_per_mux) ++ mask = vals->mask; ++ else ++ mask = pcs->fmask; ++ ++ val &= ~mask; ++ val |= (vals->val & mask); ++ pcs->write(val, vals->reg); ++ raw_spin_unlock_irqrestore(&pcs->lock, flags); ++ } ++ ++ return 0; ++} ++ ++static int pcs_request_gpio(struct pinctrl_dev *pctldev, ++ struct pinctrl_gpio_range *range, unsigned int pin) ++{ ++ u32 offset; ++ int pin_shift; ++ unsigned int data; ++ struct list_head *pos, *tmp; ++ struct pcs_gpiofunc_range *frange = NULL; ++ struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev); ++ ++ /* If function mask is null, return directly. */ ++ if (!pcs->fmask) ++ return -EOPNOTSUPP; ++ ++ list_for_each_safe(pos, tmp, &pcs->gpiofuncs) { ++ frange = list_entry(pos, struct pcs_gpiofunc_range, node); ++ if (pin >= frange->offset + frange->npins || ++ pin < frange->offset) ++ continue; ++ ++ offset = pcs_pin_reg_offset_get(pcs, pin); ++ ++ if (pcs->bits_per_mux) { ++ pin_shift = pcs_pin_shift_reg_get(pcs, pin); ++ data = pcs->read(pcs->base + offset); ++ data &= ~(pcs->fmask << pin_shift); ++ data |= frange->gpiofunc << pin_shift; ++ pcs->write(data, pcs->base + offset); ++ } else { ++ data = pcs->read(pcs->base + offset); ++ data &= ~pcs->fmask; ++ data |= frange->gpiofunc; ++ pcs->write(data, pcs->base + offset); ++ } ++ break; ++ } ++ return 0; ++} ++ ++static const struct pinmux_ops pcs_pinmux_ops = { ++ .get_functions_count = pinmux_generic_get_function_count, ++ .get_function_name = pinmux_generic_get_function_name, ++ .get_function_groups = pinmux_generic_get_function_groups, ++ .set_mux = pcs_set_mux, ++ .gpio_request_enable = pcs_request_gpio, ++}; ++ ++/* Clear BIAS value */ ++static void pcs_pinconf_clear_bias(struct pinctrl_dev *pctldev, unsigned int pin) ++{ ++ int i; ++ unsigned long config; ++ ++ for (i = 0; i < ARRAY_SIZE(pcs_bias); i++) { ++ config = pinconf_to_config_packed(pcs_bias[i], 0); ++ pcs_pinconf_set(pctldev, pin, &config, 1); ++ } ++} ++ ++/* ++ * Check whether PIN_CONFIG_BIAS_DISABLE is valid. ++ * It's depend on that PULL_DOWN & PULL_UP configs are all invalid. ++ */ ++static bool pcs_pinconf_bias_disable(struct pinctrl_dev *pctldev, unsigned int pin) ++{ ++ int i; ++ unsigned long config; ++ ++ for (i = 0; i < ARRAY_SIZE(pcs_bias); i++) { ++ config = pinconf_to_config_packed(pcs_bias[i], 0); ++ if (!pcs_pinconf_get(pctldev, pin, &config)) ++ goto out; ++ } ++ return true; ++out: ++ return false; ++} ++ ++static int pcs_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin, unsigned long *config) ++{ ++ struct pcs_function *func; ++ enum pin_config_param param; ++ unsigned int offset = 0, data = 0, i, j, ret; ++ struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev); ++ ++ ret = pcs_get_function(pctldev, pin, &func); ++ if (ret) ++ return ret; ++ ++ for (i = 0; i < func->nconfs; i++) { ++ param = pinconf_to_config_param(*config); ++ if (param == PIN_CONFIG_BIAS_DISABLE) { ++ if (pcs_pinconf_bias_disable(pctldev, pin)) { ++ *config = 0; ++ return 0; ++ } else { ++ return -EOPNOTSUPP; ++ } ++ } else if (param != func->conf[i].param) { ++ continue; ++ } ++ ++ offset = pin * (pcs->width / BITS_PER_BYTE); ++ data = pcs->read(pcs->base + offset) & func->conf[i].mask; ++ switch (func->conf[i].param) { ++ /* 4 parameters */ ++ case PIN_CONFIG_BIAS_PULL_DOWN: ++ case PIN_CONFIG_BIAS_PULL_UP: ++ case PIN_CONFIG_INPUT_SCHMITT_ENABLE: ++ if (data != func->conf[i].enable || data == func->conf[i].disable) ++ return -EOPNOTSUPP; ++ *config = 0; ++ break; ++ /* 2 parameters */ ++ case PIN_CONFIG_INPUT_SCHMITT: ++ for (j = 0; j < func->nconfs; j++) { ++ switch (func->conf[j].param) { ++ case PIN_CONFIG_INPUT_SCHMITT_ENABLE: ++ if (data != func->conf[j].enable) ++ return -EOPNOTSUPP; ++ break; ++ default: ++ break; ++ } ++ } ++ *config = data; ++ break; ++ case PIN_CONFIG_DRIVE_STRENGTH: ++ case PIN_CONFIG_SLEW_RATE: ++ case PIN_CONFIG_MODE_LOW_POWER: ++ case PIN_CONFIG_INPUT_ENABLE: ++ default: ++ *config = data; ++ break; ++ } ++ return 0; ++ } ++ return -EOPNOTSUPP; ++} ++ ++static int pin_to_gpio_number[] = { ++ 0, 1, 2, 3, 4, 5, 6, 7, ++ 8, 9, 10, 11, 12, 13, 14, 15, ++ 16, 17, 18, 19, 20, 21, 22, 23, ++ 24, 25, 26, 27, 28, 29, 30, 31, ++ 32, 33, 34, 35, 36, 37, 38, 39, ++ 40, 41, 42, 43, 44, 45, 46, 47, ++ 48, 49, 50, 51, 52, 53, 54, 55, ++ 56, 57, 58, 59, 60, 61, 62, 63, ++ 64, 65, 66, 67, 68, 69, 70, 71, ++ 72, 73, 74, 75, 76, 77, 78, 79, ++ 80, 81, 82, 83, 84, 85, 0, 0, ++ 0, 101, 100, 99, 98, 103, 102, 0, ++ 0, 0, 0, 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0, 104, 105, 106, ++ 107, 108, 109, 110, 93, 94, 95, 96, ++ 97, 0, 86, 87, 88, 89, 90, 91, ++ 92, 0, 111, 112, 113, 114, 115, 116, ++ 117, 118, 119, 120, 121, 122, 123, 124, ++ 125, 126, 127 ++}; ++ ++static int pcs_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, ++ unsigned long *configs, unsigned int num_configs) ++{ ++ int i, j, ret, gpio_number; ++ unsigned int arg, data, offset = 0, shift = 0; ++ void __iomem *gpio_base; ++ struct pcs_function *func; ++ struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev); ++ ++ ret = pcs_get_function(pctldev, pin, &func); ++ if (ret) ++ return ret; ++ ++ for (j = 0; j < num_configs; j++) { ++ for (i = 0; i < func->nconfs; i++) { ++ if (pinconf_to_config_param(configs[j]) != func->conf[i].param) ++ continue; ++ ++ offset = pin * (pcs->width / BITS_PER_BYTE); ++ data = pcs->read(pcs->base + offset); ++ arg = pinconf_to_config_argument(configs[j]); ++ ++ switch (func->conf[i].param) { ++ /* 2 parameters */ ++ case PIN_CONFIG_OUTPUT_ENABLE: ++ gpio_number = pin_to_gpio_number[pin - 1]; ++ switch (gpio_number) { ++ case 0 ... 31: ++ gpio_base = pcs->gpio_base + 0xc; ++ offset = gpio_number; ++ break; ++ case 32 ... 63: ++ gpio_base = pcs->gpio_base + 0x4 + 0xc; ++ offset = gpio_number - 32; ++ break; ++ case 64 ... 95: ++ gpio_base = pcs->gpio_base + 0x8 + 0xc; ++ offset = gpio_number - 64; ++ break; ++ case 96 ... 127: ++ gpio_base = pcs->gpio_base + 0x100 + 0xc; ++ offset = gpio_number - 96; ++ break; ++ default: ++ pr_err("Bad pin number\n"); ++ break; ++ } ++ ++ data = pcs->read(gpio_base); ++ data |= (arg << offset); ++ pcs->write(data, gpio_base); ++ ++ break; ++ case PIN_CONFIG_OUTPUT: ++ gpio_number = pin_to_gpio_number[pin - 1]; ++ switch (gpio_number) { ++ case 0 ... 31: ++ gpio_base = pcs->gpio_base + ++ ((arg == 1) ? 0x18 : 0x24); ++ offset = gpio_number; ++ break; ++ case 32 ... 63: ++ gpio_base = pcs->gpio_base + 0x4 + ++ ((arg == 1) ? 0x18 : 0x24); ++ offset = gpio_number - 32; ++ break; ++ case 64 ... 95: ++ gpio_base = pcs->gpio_base + 0x8 + ++ ((arg == 1) ? 0x18 : 0x24); ++ offset = gpio_number - 64; ++ break; ++ case 96 ... 127: ++ gpio_base = pcs->gpio_base + 0x100 + ++ ((arg == 1) ? 0x18 : 0x24); ++ offset = gpio_number - 96; ++ break; ++ default: ++ pr_err("Bad pin number\n"); ++ break; ++ } ++ ++ /* ++ * if we want to set output low, ++ * we should set the arg to 1 ++ * ++ */ ++ if (arg == 0) ++ arg = 1; ++ ++ data = 0; ++ data |= (arg << offset); ++ pcs->write(data, gpio_base); ++ break; ++ case PIN_CONFIG_INPUT_SCHMITT: ++ case PIN_CONFIG_DRIVE_STRENGTH: ++ case PIN_CONFIG_SLEW_RATE: ++ case PIN_CONFIG_MODE_LOW_POWER: ++ case PIN_CONFIG_INPUT_ENABLE: ++ shift = ffs(func->conf[i].mask) - 1; ++ data &= ~func->conf[i].mask; ++ data |= (arg << shift) & func->conf[i].mask; ++ pcs->write(data, pcs->base + offset); ++ break; ++ /* 4 parameters */ ++ case PIN_CONFIG_BIAS_DISABLE: ++ pcs_pinconf_clear_bias(pctldev, pin); ++ pcs->write(data, pcs->base + offset); ++ break; ++ case PIN_CONFIG_BIAS_PULL_DOWN: ++ case PIN_CONFIG_BIAS_PULL_UP: ++ if (arg) ++ pcs_pinconf_clear_bias(pctldev, pin); ++ fallthrough; ++ case PIN_CONFIG_INPUT_SCHMITT_ENABLE: ++ data &= ~func->conf[i].mask; ++ if (arg) ++ data |= func->conf[i].enable; ++ else ++ data |= func->conf[i].disable; ++ pcs->write(data, pcs->base + offset); ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ break; ++ } ++ if (i >= func->nconfs) ++ return -EOPNOTSUPP; ++ } ++ ++ return 0; ++} ++ ++static int pcs_pinconf_group_get(struct pinctrl_dev *pctldev, unsigned int group, ++ unsigned long *config) ++{ ++ int i, ret; ++ const unsigned int *pins; ++ unsigned int npins, old = 0; ++ ++ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins); ++ if (ret) ++ return ret; ++ for (i = 0; i < npins; i++) { ++ if (pcs_pinconf_get(pctldev, pins[i], config)) ++ return -EOPNOTSUPP; ++ /* configs do not match between two pins */ ++ if (i && old != *config) ++ return -EOPNOTSUPP; ++ old = *config; ++ } ++ return 0; ++} ++ ++static int pcs_pinconf_group_set(struct pinctrl_dev *pctldev, ++ unsigned int group, unsigned long *configs, ++ unsigned int num_configs) ++{ ++ int i, ret; ++ unsigned int npins; ++ const unsigned int *pins; ++ ++ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins); ++ if (ret) ++ return ret; ++ for (i = 0; i < npins; i++) { ++ if (pcs_pinconf_set(pctldev, pins[i], configs, num_configs)) ++ return -EOPNOTSUPP; ++ } ++ return 0; ++} ++ ++static void pcs_pinconf_dbg_show(struct pinctrl_dev *pctldev, ++ struct seq_file *s, unsigned int pin) ++{ ++} ++ ++static void pcs_pinconf_group_dbg_show(struct pinctrl_dev *pctldev, ++ struct seq_file *s, ++ unsigned int selector) ++{ ++} ++ ++static void pcs_pinconf_config_dbg_show(struct pinctrl_dev *pctldev, ++ struct seq_file *s, ++ unsigned long config) ++{ ++ pinconf_generic_dump_config(pctldev, s, config); ++} ++ ++static const struct pinconf_ops pcs_pinconf_ops = { ++ .pin_config_get = pcs_pinconf_get, ++ .pin_config_set = pcs_pinconf_set, ++ .pin_config_group_get = pcs_pinconf_group_get, ++ .pin_config_group_set = pcs_pinconf_group_set, ++ .pin_config_dbg_show = pcs_pinconf_dbg_show, ++ .pin_config_group_dbg_show = pcs_pinconf_group_dbg_show, ++ .pin_config_config_dbg_show = pcs_pinconf_config_dbg_show, ++ .is_generic = true, ++}; ++ ++/** ++ * pcs_add_pin() - add a pin to the static per controller pin array ++ * @pcs: pcs driver instance ++ * @offset: register offset from base ++ */ ++static int pcs_add_pin(struct pcs_device *pcs, unsigned int offset) ++{ ++ int i; ++ unsigned int val; ++ struct pinctrl_pin_desc *pin; ++ struct pcs_soc_data *pcs_soc = &pcs->socdata; ++ ++ i = pcs->pins.cur; ++ if (i >= pcs->desc.npins) { ++ dev_err(pcs->dev, "too many pins, max %i\n", pcs->desc.npins); ++ return -ENOMEM; ++ } ++ ++ if (pcs_soc->irq_enable_mask) { ++ val = pcs->read(pcs->base + offset); ++ if (val & pcs_soc->irq_enable_mask) { ++ dev_dbg(pcs->dev, "irq enabled at boot for pin at %lx (%x), clearing\n", ++ (unsigned long)pcs->res->start + offset, val); ++ val &= ~pcs_soc->irq_enable_mask; ++ pcs->write(val, pcs->base + offset); ++ } ++ } ++ ++ pin = &pcs->pins.pa[i]; ++ pin->number = i; ++ pcs->pins.cur++; ++ ++ return i; ++} ++ ++/** ++ * pcs_allocate_pin_table() - adds all the pins for the pinctrl driver ++ * @pcs: pcs driver instance ++ * ++ * In case of errors, resources are freed in pcs_free_resources. ++ * ++ * If your hardware needs holes in the address space, then just set ++ * up multiple driver instances. ++ */ ++static int pcs_allocate_pin_table(struct pcs_device *pcs) ++{ ++ int i, res, mux_bytes, nr_pins; ++ unsigned int offset; ++ ++ mux_bytes = pcs->width / BITS_PER_BYTE; ++ ++ if (pcs->bits_per_mux && pcs->fmask) { ++ pcs->bits_per_pin = fls(pcs->fmask); ++ nr_pins = (pcs->size * BITS_PER_BYTE) / pcs->bits_per_pin; ++ } else { ++ nr_pins = pcs->size / mux_bytes; ++ } ++ ++ dev_dbg(pcs->dev, "allocating %i pins\n", nr_pins); ++ pcs->pins.pa = devm_kcalloc(pcs->dev, nr_pins, sizeof(*pcs->pins.pa), ++ GFP_KERNEL); ++ if (!pcs->pins.pa) ++ return -ENOMEM; ++ ++ pcs->desc.pins = pcs->pins.pa; ++ pcs->desc.npins = nr_pins; ++ ++ for (i = 0; i < pcs->desc.npins; i++) { ++ offset = pcs_pin_reg_offset_get(pcs, i); ++ res = pcs_add_pin(pcs, offset); ++ if (res < 0) { ++ dev_err(pcs->dev, "error adding pins: %i\n", res); ++ return res; ++ } ++ } ++ ++ return 0; ++} ++ ++/** ++ * pcs_add_function() - adds a new function to the function list ++ * @pcs: pcs driver instance ++ * @fcn: new function allocated ++ * @name: name of the function ++ * @vals: array of mux register value pairs used by the function ++ * @nvals: number of mux register value pairs ++ * @pgnames: array of pingroup names for the function ++ * @npgnames: number of pingroup names ++ * ++ * Caller must take care of locking. ++ */ ++static int pcs_add_function(struct pcs_device *pcs, struct pcs_function **fcn, ++ const char *name, struct pcs_func_vals *vals, ++ unsigned int nvals, const char **pgnames, ++ unsigned int npgnames) ++{ ++ int selector; ++ struct pcs_function *function; ++ ++ function = devm_kzalloc(pcs->dev, sizeof(*function), GFP_KERNEL); ++ if (!function) ++ return -ENOMEM; ++ ++ function->vals = vals; ++ function->nvals = nvals; ++ function->name = name; ++ ++ selector = pinmux_generic_add_function(pcs->pctl, name, pgnames, npgnames, function); ++ if (selector < 0) { ++ devm_kfree(pcs->dev, function); ++ *fcn = NULL; ++ } else { ++ *fcn = function; ++ } ++ ++ return selector; ++} ++ ++/** ++ * pcs_get_pin_by_offset() - get a pin index based on the register offset ++ * @pcs: pcs driver instance ++ * @offset: register offset from the base ++ * ++ * Note that this is OK as long as the pins are in a static array. ++ */ ++static int pcs_get_pin_by_offset(struct pcs_device *pcs, unsigned int offset) ++{ ++ unsigned int index; ++ ++ if (offset >= pcs->size) { ++ dev_err(pcs->dev, "mux offset out of range: 0x%x (0x%x)\n", offset, pcs->size); ++ return -EINVAL; ++ } ++ ++ if (pcs->bits_per_mux) ++ index = (offset * BITS_PER_BYTE) / pcs->bits_per_pin; ++ else ++ index = offset / (pcs->width / BITS_PER_BYTE); ++ ++ return index; ++} ++ ++/* ++ * check whether data matches enable bits or disable bits ++ * Return value: 1 for matching enable bits, 0 for matching disable bits, ++ * and negative value for matching failure. ++ */ ++static int pcs_config_match(unsigned int data, unsigned int enable, unsigned int disable) ++{ ++ int ret = -EINVAL; ++ ++ if (data == enable) ++ ret = 1; ++ else if (data == disable) ++ ret = 0; ++ return ret; ++} ++ ++static void add_config(struct pcs_conf_vals **conf, ++ enum pin_config_param param, ++ unsigned int value, unsigned int enable, ++ unsigned int disable, unsigned int mask) ++{ ++ (*conf)->param = param; ++ (*conf)->val = value; ++ (*conf)->enable = enable; ++ (*conf)->disable = disable; ++ (*conf)->mask = mask; ++ (*conf)++; ++} ++ ++static void add_setting(unsigned long **setting, enum pin_config_param param, unsigned int arg) ++{ ++ **setting = pinconf_to_config_packed(param, arg); ++ (*setting)++; ++} ++ ++/* add pinconf setting with 2 parameters */ ++static void pcs_add_conf2(struct pcs_device *pcs, struct device_node *np, ++ const char *name, enum pin_config_param param, ++ struct pcs_conf_vals **conf, unsigned long **settings) ++{ ++ int ret; ++ unsigned int value[2], shift; ++ ++ ret = of_property_read_u32_array(np, name, value, 2); ++ if (ret) ++ return; ++ /* set value & mask */ ++ value[0] &= value[1]; ++ shift = ffs(value[1]) - 1; ++ /* skip enable & disable */ ++ add_config(conf, param, value[0], 0, 0, value[1]); ++ add_setting(settings, param, value[0] >> shift); ++} ++ ++/* add pinconf setting with 4 parameters */ ++static void pcs_add_conf4(struct pcs_device *pcs, struct device_node *np, ++ const char *name, enum pin_config_param param, ++ struct pcs_conf_vals **conf, unsigned long **settings) ++{ ++ int ret; ++ unsigned int value[4]; ++ ++ /* value to set, enable, disable, mask */ ++ ret = of_property_read_u32_array(np, name, value, 4); ++ if (ret) ++ return; ++ if (!value[3]) { ++ dev_err(pcs->dev, "mask field of the property can't be 0\n"); ++ return; ++ } ++ value[0] &= value[3]; ++ value[1] &= value[3]; ++ value[2] &= value[3]; ++ ret = pcs_config_match(value[0], value[1], value[2]); ++ if (ret < 0) ++ dev_dbg(pcs->dev, "failed to match enable or disable bits\n"); ++ add_config(conf, param, value[0], value[1], value[2], value[3]); ++ add_setting(settings, param, ret); ++} ++ ++static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np, ++ struct pcs_function *func, ++ struct pinctrl_map **map) ++ ++{ ++ int i = 0, nconfs = 0; ++ struct pinctrl_map *m = *map; ++ unsigned long *settings = NULL, *s = NULL; ++ struct pcs_conf_vals *conf = NULL; ++ static const struct pcs_conf_type prop2[] = { ++ { "pinctrl-single,drive-strength", PIN_CONFIG_DRIVE_STRENGTH, }, ++ { "pinctrl-single,slew-rate", PIN_CONFIG_SLEW_RATE, }, ++ { "pinctrl-single,input-enable", PIN_CONFIG_INPUT_ENABLE, }, ++ { "pinctrl-single,input-schmitt", PIN_CONFIG_INPUT_SCHMITT, }, ++ { "pinctrl-single,output-enable", PIN_CONFIG_OUTPUT_ENABLE, }, ++ { "pinctrl-single,output", PIN_CONFIG_OUTPUT, }, ++ { "pinctrl-single,low-power-mode", PIN_CONFIG_MODE_LOW_POWER, }, ++ }; ++ static const struct pcs_conf_type prop4[] = { ++ { "pinctrl-single,bias-pullup", PIN_CONFIG_BIAS_PULL_UP, }, ++ { "pinctrl-single,bias-pulldown", PIN_CONFIG_BIAS_PULL_DOWN, }, ++ { "pinctrl-single,input-schmitt-enable", ++ PIN_CONFIG_INPUT_SCHMITT_ENABLE, }, ++ }; ++ ++ /* If pinconf isn't supported, don't parse properties in below. */ ++ if (!PCS_HAS_PINCONF(pcs)) ++ return -EOPNOTSUPP; ++ ++ /* cacluate how much properties are supported in current node */ ++ for (i = 0; i < ARRAY_SIZE(prop2); i++) { ++ if (of_property_present(np, prop2[i].name)) ++ nconfs++; ++ } ++ for (i = 0; i < ARRAY_SIZE(prop4); i++) { ++ if (of_property_present(np, prop4[i].name)) ++ nconfs++; ++ } ++ if (!nconfs) ++ return -EOPNOTSUPP; ++ ++ func->conf = devm_kcalloc(pcs->dev, nconfs, sizeof(struct pcs_conf_vals), GFP_KERNEL); ++ if (!func->conf) ++ return -ENOMEM; ++ func->nconfs = nconfs; ++ conf = &func->conf[0]; ++ m++; ++ settings = devm_kcalloc(pcs->dev, nconfs, sizeof(unsigned long), GFP_KERNEL); ++ if (!settings) ++ return -ENOMEM; ++ s = &settings[0]; ++ ++ for (i = 0; i < ARRAY_SIZE(prop2); i++) ++ pcs_add_conf2(pcs, np, prop2[i].name, prop2[i].param, ++ &conf, &s); ++ for (i = 0; i < ARRAY_SIZE(prop4); i++) ++ pcs_add_conf4(pcs, np, prop4[i].name, prop4[i].param, ++ &conf, &s); ++ m->type = PIN_MAP_TYPE_CONFIGS_GROUP; ++ m->data.configs.group_or_pin = np->name; ++ m->data.configs.configs = settings; ++ m->data.configs.num_configs = nconfs; ++ return 0; ++} ++ ++/** ++ * pcs_parse_one_pinctrl_entry() - parses a device tree mux entry ++ * @pcs: pinctrl driver instance ++ * @np: device node of the mux entry ++ * @map: map entry ++ * @num_maps: number of map ++ * @pgnames: pingroup names ++ * ++ * Note that this binding currently supports only sets of one register + value. ++ * ++ * Also note that this driver tries to avoid understanding pin and function ++ * names because of the extra bloat they would cause especially in the case of ++ * a large number of pins. This driver just sets what is specified for the board ++ * in the .dts file. Further user space debugging tools can be developed to ++ * decipher the pin and function names using debugfs. ++ * ++ * If you are concerned about the boot time, set up the static pins in ++ * the bootloader, and only set up selected pins as device tree entries. ++ */ ++static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs, ++ struct device_node *np, ++ struct pinctrl_map **map, ++ unsigned int *num_maps, ++ const char **pgnames) ++{ ++ unsigned int offset; ++ struct pcs_func_vals *vals; ++ int i, fsel, gsel, pin, rows, *pins, found = 0, res = -ENOMEM; ++ struct pcs_function *function = NULL; ++ struct of_phandle_args pinctrl_spec; ++ const char *name = "pinctrl-single,pins"; ++ ++ rows = pinctrl_count_index_with_args(np, name); ++ if (rows <= 0) { ++ dev_err(pcs->dev, "Invalid number of rows: %d\n", rows); ++ return -EINVAL; ++ } ++ ++ vals = devm_kcalloc(pcs->dev, rows, sizeof(*vals), GFP_KERNEL); ++ if (!vals) ++ return -ENOMEM; ++ ++ pins = devm_kcalloc(pcs->dev, rows, sizeof(*pins), GFP_KERNEL); ++ if (!pins) ++ goto free_vals; ++ ++ for (i = 0; i < rows; i++) { ++ res = pinctrl_parse_index_with_args(np, name, i, &pinctrl_spec); ++ if (res) ++ return res; ++ ++ if (pinctrl_spec.args_count < 2 || pinctrl_spec.args_count > 3) { ++ dev_err(pcs->dev, "invalid args_count for spec: %i\n", ++ pinctrl_spec.args_count); ++ break; ++ } ++ ++ offset = pinctrl_spec.args[0]; ++ vals[found].reg = pcs->base + offset; ++ ++ switch (pinctrl_spec.args_count) { ++ case 2: ++ vals[found].val = pinctrl_spec.args[1]; ++ break; ++ case 3: ++ vals[found].val = (pinctrl_spec.args[1] | pinctrl_spec.args[2]); ++ break; ++ default: ++ break; ++ } ++ ++ dev_dbg(pcs->dev, "%pOFn index: 0x%x value: 0x%x\n", ++ pinctrl_spec.np, offset, vals[found].val); ++ ++ pin = pcs_get_pin_by_offset(pcs, offset); ++ if (pin < 0) { ++ dev_err(pcs->dev, ++ "could not add functions for %pOFn %ux\n", ++ np, offset); ++ break; ++ } ++ pins[found++] = pin; ++ } ++ ++ pgnames[0] = np->name; ++ mutex_lock(&pcs->mutex); ++ fsel = pcs_add_function(pcs, &function, np->name, vals, found, pgnames, 1); ++ if (fsel < 0) { ++ res = fsel; ++ goto free_pins; ++ } ++ ++ gsel = pinctrl_generic_add_group(pcs->pctl, np->name, pins, found, pcs); ++ if (gsel < 0) { ++ res = gsel; ++ goto free_function; ++ } ++ ++ (*map)->type = PIN_MAP_TYPE_MUX_GROUP; ++ (*map)->data.mux.group = np->name; ++ (*map)->data.mux.function = np->name; ++ ++ if (PCS_HAS_PINCONF(pcs) && function) { ++ res = pcs_parse_pinconf(pcs, np, function, map); ++ if (res == 0) ++ *num_maps = 2; ++ else if (res == -EOPNOTSUPP) ++ *num_maps = 1; ++ else ++ goto free_pingroups; ++ } else { ++ *num_maps = 1; ++ } ++ mutex_unlock(&pcs->mutex); ++ ++ return 0; ++ ++free_pingroups: ++ pinctrl_generic_remove_group(pcs->pctl, gsel); ++ *num_maps = 1; ++free_function: ++ pinmux_generic_remove_function(pcs->pctl, fsel); ++free_pins: ++ mutex_unlock(&pcs->mutex); ++ devm_kfree(pcs->dev, pins); ++ ++free_vals: ++ devm_kfree(pcs->dev, vals); ++ ++ return res; ++} ++ ++static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs, ++ struct device_node *np, ++ struct pinctrl_map **map, ++ unsigned int *num_maps, ++ const char **pgnames) ++{ ++ const char *name = "pinctrl-single,bits"; ++ struct pcs_func_vals *vals; ++ int i, fsel, pin, rows, *pins, npins_in_row, found = 0, res = -ENOMEM; ++ struct pcs_function *function = NULL; ++ struct of_phandle_args pinctrl_spec; ++ unsigned int offset, val, pin_num_from_lsb; ++ unsigned int mask, bit_pos, val_pos, mask_pos, submask; ++ ++ rows = pinctrl_count_index_with_args(np, name); ++ if (rows <= 0) { ++ dev_err(pcs->dev, "Invalid number of rows: %d\n", rows); ++ return -EINVAL; ++ } ++ ++ if (PCS_HAS_PINCONF(pcs)) { ++ dev_err(pcs->dev, "pinconf not supported\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ npins_in_row = pcs->width / pcs->bits_per_pin; ++ ++ vals = devm_kzalloc(pcs->dev, ++ array3_size(rows, npins_in_row, sizeof(*vals)), ++ GFP_KERNEL); ++ if (!vals) ++ return -ENOMEM; ++ ++ pins = devm_kzalloc(pcs->dev, ++ array3_size(rows, npins_in_row, sizeof(*pins)), ++ GFP_KERNEL); ++ if (!pins) ++ goto free_vals; ++ ++ for (i = 0; i < rows; i++) { ++ res = pinctrl_parse_index_with_args(np, name, i, &pinctrl_spec); ++ if (res) ++ return res; ++ ++ if (pinctrl_spec.args_count < 3) { ++ dev_err(pcs->dev, "invalid args_count for spec: %i\n", ++ pinctrl_spec.args_count); ++ break; ++ } ++ ++ /* Index plus two value cells */ ++ offset = pinctrl_spec.args[0]; ++ val = pinctrl_spec.args[1]; ++ mask = pinctrl_spec.args[2]; ++ ++ dev_dbg(pcs->dev, "%pOFn index: 0x%x value: 0x%x mask: 0x%x\n", ++ pinctrl_spec.np, offset, val, mask); ++ ++ /* Parse pins in each row from LSB */ ++ while (mask) { ++ bit_pos = __ffs(mask); ++ pin_num_from_lsb = bit_pos / pcs->bits_per_pin; ++ mask_pos = ((pcs->fmask) << bit_pos); ++ val_pos = val & mask_pos; ++ submask = mask & mask_pos; ++ ++ if ((mask & mask_pos) == 0) { ++ dev_err(pcs->dev, ++ "Invalid mask for %pOFn at 0x%x\n", ++ np, offset); ++ break; ++ } ++ ++ mask &= ~mask_pos; ++ ++ if (submask != mask_pos) { ++ dev_warn(pcs->dev, ++ "Invalid submask 0x%x for %pOFn at 0x%x\n", ++ submask, np, offset); ++ continue; ++ } ++ ++ vals[found].mask = submask; ++ vals[found].reg = pcs->base + offset; ++ vals[found].val = val_pos; ++ ++ pin = pcs_get_pin_by_offset(pcs, offset); ++ if (pin < 0) { ++ dev_err(pcs->dev, ++ "could not add functions for %pOFn %ux\n", ++ np, offset); ++ break; ++ } ++ pins[found++] = pin + pin_num_from_lsb; ++ } ++ } ++ ++ pgnames[0] = np->name; ++ mutex_lock(&pcs->mutex); ++ fsel = pcs_add_function(pcs, &function, np->name, vals, found, pgnames, 1); ++ if (fsel < 0) { ++ res = fsel; ++ goto free_pins; ++ } ++ ++ res = pinctrl_generic_add_group(pcs->pctl, np->name, pins, found, pcs); ++ if (res < 0) ++ goto free_function; ++ ++ (*map)->type = PIN_MAP_TYPE_MUX_GROUP; ++ (*map)->data.mux.group = np->name; ++ (*map)->data.mux.function = np->name; ++ ++ *num_maps = 1; ++ mutex_unlock(&pcs->mutex); ++ ++ return 0; ++ ++free_function: ++ pinmux_generic_remove_function(pcs->pctl, fsel); ++free_pins: ++ mutex_unlock(&pcs->mutex); ++ devm_kfree(pcs->dev, pins); ++ ++free_vals: ++ devm_kfree(pcs->dev, vals); ++ ++ return res; ++} ++ ++/** ++ * pcs_dt_node_to_map() - allocates and parses pinctrl maps ++ * @pctldev: pinctrl instance ++ * @np_config: device tree pinmux entry ++ * @map: array of map entries ++ * @num_maps: number of maps ++ */ ++static int pcs_dt_node_to_map(struct pinctrl_dev *pctldev, ++ struct device_node *np_config, ++ struct pinctrl_map **map, unsigned int *num_maps) ++{ ++ int ret; ++ const char **pgnames; ++ struct pcs_device *pcs; ++ ++ pcs = pinctrl_dev_get_drvdata(pctldev); ++ ++ /* create 2 maps. One is for pinmux, and the other is for pinconf. */ ++ *map = devm_kcalloc(pcs->dev, 2, sizeof(**map), GFP_KERNEL); ++ if (!*map) ++ return -ENOMEM; ++ ++ *num_maps = 0; ++ ++ pgnames = devm_kzalloc(pcs->dev, sizeof(*pgnames), GFP_KERNEL); ++ if (!pgnames) { ++ ret = -ENOMEM; ++ goto free_map; ++ } ++ ++ if (pcs->bits_per_mux) { ++ ret = pcs_parse_bits_in_pinctrl_entry(pcs, np_config, map, ++ num_maps, pgnames); ++ if (ret < 0) { ++ dev_err(pcs->dev, "no pins entries for %pOFn\n", ++ np_config); ++ goto free_pgnames; ++ } ++ } else { ++ ret = pcs_parse_one_pinctrl_entry(pcs, np_config, map, ++ num_maps, pgnames); ++ if (ret < 0) { ++ dev_err(pcs->dev, "no pins entries for %pOFn\n", ++ np_config); ++ goto free_pgnames; ++ } ++ } ++ ++ return 0; ++ ++free_pgnames: ++ devm_kfree(pcs->dev, pgnames); ++free_map: ++ devm_kfree(pcs->dev, *map); ++ ++ return ret; ++} ++ ++/** ++ * pcs_irq_free() - free interrupt ++ * @pcs: pcs driver instance ++ */ ++static void pcs_irq_free(struct pcs_device *pcs) ++{ ++ struct pcs_soc_data *pcs_soc = &pcs->socdata; ++ ++ if (pcs_soc->irq < 0) ++ return; ++ ++ if (pcs->domain) ++ irq_domain_remove(pcs->domain); ++ ++ if (PCS_QUIRK_HAS_SHARED_IRQ(pcs)) ++ free_irq(pcs_soc->irq, pcs_soc); ++ else ++ irq_set_chained_handler(pcs_soc->irq, NULL); ++} ++ ++/** ++ * pcs_free_resources() - free memory used by this driver ++ * @pcs: pcs driver instance ++ */ ++static void pcs_free_resources(struct pcs_device *pcs) ++{ ++ pcs_irq_free(pcs); ++ ++#if IS_BUILTIN(CONFIG_PINCTRL_SINGLE) ++ if (pcs->missing_nr_pinctrl_cells) ++ of_remove_property(pcs->np, pcs->missing_nr_pinctrl_cells); ++#endif ++} ++ ++static int pcs_add_gpio_func(struct device_node *node, struct pcs_device *pcs) ++{ ++ int ret, i; ++ struct of_phandle_args gpiospec; ++ struct pcs_gpiofunc_range *range; ++ const char *propname = "pinctrl-single,gpio-range"; ++ const char *cellname = "#pinctrl-single,gpio-range-cells"; ++ ++ for (i = 0; ; i++) { ++ ret = of_parse_phandle_with_args(node, propname, cellname, ++ i, &gpiospec); ++ /* Do not treat it as error. Only treat it as end condition. */ ++ if (ret) { ++ ret = 0; ++ break; ++ } ++ range = devm_kzalloc(pcs->dev, sizeof(*range), GFP_KERNEL); ++ if (!range) { ++ ret = -ENOMEM; ++ break; ++ } ++ range->offset = gpiospec.args[0]; ++ range->npins = gpiospec.args[1]; ++ range->gpiofunc = gpiospec.args[2]; ++ mutex_lock(&pcs->mutex); ++ list_add_tail(&range->node, &pcs->gpiofuncs); ++ mutex_unlock(&pcs->mutex); ++ } ++ return ret; ++} ++ ++/** ++ * struct pcs_interrupt ++ * @reg: virtual address of interrupt register ++ * @hwirq: hardware irq number ++ * @irq: virtual irq number ++ * @node: list node ++ */ ++struct pcs_interrupt { ++ void __iomem *reg; ++ irq_hw_number_t hwirq; ++ unsigned int irq; ++ struct list_head node; ++}; ++ ++/** ++ * pcs_irq_set() - enables or disables an interrupt ++ * @pcs_soc: SoC specific settings ++ * @irq: interrupt ++ * @enable: enable or disable the interrupt ++ * ++ * Note that this currently assumes one interrupt per pinctrl ++ * register that is typically used for wake-up events. ++ */ ++static inline void pcs_irq_set(struct pcs_soc_data *pcs_soc, int irq, const bool enable) ++{ ++ struct pcs_device *pcs; ++ struct list_head *pos; ++ unsigned int mask, soc_mask; ++ struct pcs_interrupt *pcswi; ++ ++ pcs = container_of(pcs_soc, struct pcs_device, socdata); ++ list_for_each(pos, &pcs->irqs) { ++ pcswi = list_entry(pos, struct pcs_interrupt, node); ++ if (irq != pcswi->irq) ++ continue; ++ ++ soc_mask = pcs_soc->irq_enable_mask; ++ raw_spin_lock(&pcs->lock); ++ mask = pcs->read(pcswi->reg); ++ if (enable) ++ mask |= soc_mask; ++ else ++ mask &= ~soc_mask; ++ pcs->write(mask, pcswi->reg); ++ ++ /* flush posted write */ ++ mask = pcs->read(pcswi->reg); ++ raw_spin_unlock(&pcs->lock); ++ } ++ ++ if (pcs_soc->rearm) ++ pcs_soc->rearm(); ++} ++ ++/** ++ * pcs_irq_mask() - mask pinctrl interrupt ++ * @d: interrupt data ++ */ ++static void pcs_irq_mask(struct irq_data *d) ++{ ++ struct pcs_soc_data *pcs_soc = irq_data_get_irq_chip_data(d); ++ ++ pcs_irq_set(pcs_soc, d->irq, true); ++} ++ ++/** ++ * pcs_irq_unmask() - unmask pinctrl interrupt ++ * @d: interrupt data ++ */ ++static void pcs_irq_unmask(struct irq_data *d) ++{ ++ struct pcs_soc_data *pcs_soc = irq_data_get_irq_chip_data(d); ++ ++ pcs_irq_set(pcs_soc, d->irq, false); ++} ++ ++/** ++ * pcs_irq_set_wake() - toggle the suspend and resume wake up ++ * @d: interrupt data ++ * @state: wake-up state ++ * ++ * Note that this should be called only for suspend and resume. ++ * For runtime PM, the wake-up events should be enabled by default. ++ */ ++static int pcs_irq_set_wake(struct irq_data *d, unsigned int state) ++{ ++ if (state) ++ pcs_irq_unmask(d); ++ else ++ pcs_irq_mask(d); ++ ++ return 0; ++} ++ ++static inline void _pcs_irq_set_type(struct pcs_soc_data *pcs_soc, int irq, int flow_type) ++{ ++ struct pcs_device *pcs; ++ struct list_head *pos; ++ unsigned int mask, soc_mask; ++ struct pcs_interrupt *pcswi; ++ ++ pcs = container_of(pcs_soc, struct pcs_device, socdata); ++ list_for_each(pos, &pcs->irqs) { ++ pcswi = list_entry(pos, struct pcs_interrupt, node); ++ if (irq != pcswi->irq) ++ continue; ++ ++ soc_mask = pcs_soc->irq_enable_mask; ++ raw_spin_lock(&pcs->lock); ++ mask = pcs->read(pcswi->reg); ++ ++ if (flow_type == IRQ_TYPE_EDGE_RISING) ++ mask |= (1 << EDGE_RISE_EN); ++ else ++ mask &= ~(1 << EDGE_RISE_EN); ++ ++ if (flow_type == IRQ_TYPE_EDGE_FALLING) ++ mask |= (1 << EDGE_FALL_EN); ++ else ++ mask &= ~(1 << EDGE_FALL_EN); ++ ++ pcs->write(mask, pcswi->reg); ++ ++ /* flush posted write */ ++ mask = pcs->read(pcswi->reg); ++ raw_spin_unlock(&pcs->lock); ++ } ++} ++ ++static int pcs_irq_set_type(struct irq_data *d, unsigned int flow_type) ++{ ++ struct pcs_soc_data *pcs_soc = irq_data_get_irq_chip_data(d); ++ ++ _pcs_irq_set_type(pcs_soc, d->irq, flow_type); ++ ++ return 0; ++} ++ ++/** ++ * pcs_irq_handle() - common interrupt handler ++ * @pcs_soc: SoC specific settings ++ * ++ * Note that this currently assumes we have one interrupt bit per ++ * mux register. This interrupt is typically used for wake-up events. ++ * For more complex interrupts different handlers can be specified. ++ */ ++static int pcs_irq_handle(struct pcs_soc_data *pcs_soc) ++{ ++ int count = 0; ++ struct list_head *pos; ++ struct pcs_device *pcs; ++ struct pcs_interrupt *pcswi; ++ unsigned int mask, reg_offset, bit_offset; ++ ++ pcs = container_of(pcs_soc, struct pcs_device, socdata); ++ list_for_each(pos, &pcs->irqs) { ++ pcswi = list_entry(pos, struct pcs_interrupt, node); ++ reg_offset = (pcswi->hwirq / 4 - 1) / 32 * 4; ++ bit_offset = (pcswi->hwirq / 4 - 1) - reg_offset / 4 * 32; ++ ++ raw_spin_lock(&pcs->lock); ++ mask = pcs->read(pcs->gedge_flag_base + reg_offset); ++ raw_spin_unlock(&pcs->lock); ++ ++ if (mask & (1 << bit_offset)) { ++ generic_handle_domain_irq(pcs->domain, pcswi->hwirq); ++ count++; ++ } ++ } ++ ++ return count; ++} ++ ++/** ++ * pcs_irq_handler() - handler for the shared interrupt case ++ * @irq: interrupt ++ * @d: data ++ * ++ * Use this for cases where multiple instances of ++ * pinctrl-single share a single interrupt like on omaps. ++ */ ++static irqreturn_t pcs_irq_handler(int irq, void *d) ++{ ++ struct pcs_soc_data *pcs_soc = d; ++ ++ return pcs_irq_handle(pcs_soc) ? IRQ_HANDLED : IRQ_NONE; ++} ++ ++/** ++ * pcs_irq_chain_handler() - handler for the dedicated chained interrupt case ++ * @desc: interrupt descriptor ++ * ++ * Use this if you have a separate interrupt for each ++ * pinctrl-single instance. ++ */ ++static void pcs_irq_chain_handler(struct irq_desc *desc) ++{ ++ struct irq_chip *chip; ++ struct pcs_soc_data *pcs_soc = irq_desc_get_handler_data(desc); ++ ++ chip = irq_desc_get_chip(desc); ++ chained_irq_enter(chip, desc); ++ pcs_irq_handle(pcs_soc); ++ chained_irq_exit(chip, desc); ++} ++ ++static int pcs_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) ++{ ++ struct pcs_device *pcs; ++ struct pcs_interrupt *pcswi; ++ struct pcs_soc_data *pcs_soc = d->host_data; ++ ++ pcs = container_of(pcs_soc, struct pcs_device, socdata); ++ pcswi = devm_kzalloc(pcs->dev, sizeof(*pcswi), GFP_KERNEL); ++ if (!pcswi) ++ return -ENOMEM; ++ ++ pcswi->reg = pcs->base + hwirq; ++ pcswi->hwirq = hwirq; ++ pcswi->irq = irq; ++ ++ mutex_lock(&pcs->mutex); ++ list_add_tail(&pcswi->node, &pcs->irqs); ++ mutex_unlock(&pcs->mutex); ++ ++ irq_set_chip_data(irq, pcs_soc); ++ irq_set_chip_and_handler(irq, &pcs->chip, ++ handle_level_irq); ++ irq_set_lockdep_class(irq, &pcs_lock_class, &pcs_request_class); ++ irq_set_noprobe(irq); ++ ++ return 0; ++} ++ ++static const struct irq_domain_ops pcs_irqdomain_ops = { ++ .map = pcs_irqdomain_map, ++ .xlate = irq_domain_xlate_onecell, ++}; ++ ++/** ++ * pcs_irq_init_chained_handler() - set up a chained interrupt handler ++ * @pcs: pcs driver instance ++ * @np: device node pointer ++ */ ++static int pcs_irq_init_chained_handler(struct pcs_device *pcs, struct device_node *np) ++{ ++ int num_irqs; ++ const char *name = "pinctrl"; ++ struct pcs_soc_data *pcs_soc = &pcs->socdata; ++ ++ if (!pcs_soc->irq_enable_mask || !pcs_soc->irq_status_mask) { ++ pcs_soc->irq = -1; ++ return -EINVAL; ++ } ++ ++ INIT_LIST_HEAD(&pcs->irqs); ++ pcs->chip.name = name; ++ pcs->chip.irq_ack = pcs_irq_mask; ++ pcs->chip.irq_mask = pcs_irq_mask; ++ pcs->chip.irq_unmask = pcs_irq_unmask; ++ pcs->chip.irq_set_wake = pcs_irq_set_wake; ++ pcs->chip.flags = IRQCHIP_SKIP_SET_WAKE; ++ pcs->chip.irq_set_type = pcs_irq_set_type; ++ ++ if (PCS_QUIRK_HAS_SHARED_IRQ(pcs)) { ++ int res; ++ ++ res = request_irq(pcs_soc->irq, pcs_irq_handler, ++ IRQF_SHARED | IRQF_NO_SUSPEND | IRQF_NO_THREAD, ++ name, pcs_soc); ++ if (res) { ++ pcs_soc->irq = -1; ++ return res; ++ } ++ } else { ++ irq_set_chained_handler_and_data(pcs_soc->irq, ++ pcs_irq_chain_handler, ++ pcs_soc); ++ } ++ ++ /* ++ * We can use the register offset as the hardirq ++ * number as irq_domain_add_simple maps them lazily. ++ * This way we can easily support more than one ++ * interrupt per function if needed. ++ */ ++ num_irqs = pcs->size; ++ ++ pcs->domain = irq_domain_add_simple(np, num_irqs, 0, ++ &pcs_irqdomain_ops, ++ pcs_soc); ++ if (!pcs->domain) { ++ irq_set_chained_handler(pcs_soc->irq, NULL); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM_SLEEP ++static struct pcs_device *pinctrl_pcs; ++ ++static int pinctrl_syscore_suspend(void) ++{ ++ pinctrl_force_sleep(pinctrl_pcs->pctl); ++ ++ return 0; ++} ++ ++static void pinctrl_syscore_resume(void) ++{ ++ pinctrl_force_default(pinctrl_pcs->pctl); ++} ++ ++static struct syscore_ops pinctrl_syscore_ops = { ++ .suspend = pinctrl_syscore_suspend, ++ .resume = pinctrl_syscore_resume, ++}; ++#endif ++ ++/** ++ * pcs_quirk_missing_pinctrl_cells - handle legacy binding ++ * @pcs: pinctrl driver instance ++ * @np: device tree node ++ * @cells: number of cells ++ * ++ * Handle legacy binding with no #pinctrl-cells. This should be ++ * always two pinctrl-single,bit-per-mux and one for others. ++ * At some point we may want to consider removing this. ++ */ ++static int pcs_quirk_missing_pinctrl_cells(struct pcs_device *pcs, ++ struct device_node *np, ++ int cells) ++{ ++ u32 val; ++ int error; ++ struct property *p; ++ const char *name = "#pinctrl-cells"; ++ ++ error = of_property_read_u32(np, name, &val); ++ if (!error) ++ return 0; ++ ++ dev_warn(pcs->dev, "please update dts to use %s = <%i>\n", name, cells); ++ ++ p = devm_kzalloc(pcs->dev, sizeof(*p), GFP_KERNEL); ++ if (!p) ++ return -ENOMEM; ++ ++ p->length = sizeof(__be32); ++ p->value = devm_kzalloc(pcs->dev, sizeof(__be32), GFP_KERNEL); ++ if (!p->value) ++ return -ENOMEM; ++ *(__be32 *)p->value = cpu_to_be32(cells); ++ ++ p->name = devm_kstrdup(pcs->dev, name, GFP_KERNEL); ++ if (!p->name) ++ return -ENOMEM; ++ ++ pcs->missing_nr_pinctrl_cells = p; ++ ++#if IS_BUILTIN(CONFIG_PINCTRL_SINGLE) ++ error = of_add_property(np, pcs->missing_nr_pinctrl_cells); ++#endif ++ ++ return error; ++} ++ ++static struct clk *psc_clk; ++static struct reset_control *psc_rst; ++ ++static int pcs_probe(struct platform_device *pdev) ++{ ++ int ret; ++ u32 regval, i; ++ void __iomem *base; ++ struct pcs_pdata *pdata; ++ struct resource *res; ++ struct pcs_device *pcs; ++ const struct pcs_soc_data *soc; ++ struct device_node *np = pdev->dev.of_node; ++ ++ soc = of_device_get_match_data(&pdev->dev); ++ if (WARN_ON(!soc)) ++ return -EINVAL; ++ ++ psc_rst = devm_reset_control_get_exclusive(&pdev->dev, "aib_rst"); ++ if (IS_ERR(psc_rst)) { ++ ret = PTR_ERR(psc_rst); ++ dev_err(&pdev->dev, "Failed to get reset: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ /* deasser clk */ ++ ret = reset_control_deassert(psc_rst); ++ if (ret) { ++ dev_err(&pdev->dev, "Failed to deassert reset: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ psc_clk = devm_clk_get(&pdev->dev, NULL); ++ if (IS_ERR(psc_clk)) { ++ dev_err(&pdev->dev, "Fail to get pinctrl clock, error %ld.\n", ++ PTR_ERR(psc_clk)); ++ return PTR_ERR(psc_clk); ++ } ++ ++ ret = clk_prepare_enable(psc_clk); ++ if (ret) { ++ dev_err(&pdev->dev, "Fail to enable pinctrl clock, error %d.\n", ret); ++ return ret; ++ } ++ ++ pcs = devm_kzalloc(&pdev->dev, sizeof(*pcs), GFP_KERNEL); ++ if (!pcs) ++ return -ENOMEM; ++ ++ pcs->dev = &pdev->dev; ++ pcs->np = np; ++ raw_spin_lock_init(&pcs->lock); ++ mutex_init(&pcs->mutex); ++ INIT_LIST_HEAD(&pcs->gpiofuncs); ++ pcs->flags = soc->flags; ++ memcpy(&pcs->socdata, soc, sizeof(*soc)); ++ ++ ret = of_property_read_u32(np, "pinctrl-single,register-width", &pcs->width); ++ if (ret) { ++ dev_err(pcs->dev, "register width not specified\n"); ++ ++ return ret; ++ } ++ ++ ret = of_property_read_u32(np, "pinctrl-single,function-mask", &pcs->fmask); ++ if (!ret) { ++ pcs->fshift = __ffs(pcs->fmask); ++ pcs->fmax = pcs->fmask >> pcs->fshift; ++ } else { ++ /* If mask property doesn't exist, function mux is invalid. */ ++ pcs->fmask = 0; ++ pcs->fshift = 0; ++ pcs->fmax = 0; ++ } ++ ++ ret = of_property_read_u32(np, "pinctrl-single,function-off", &pcs->foff); ++ if (ret) ++ pcs->foff = PCS_OFF_DISABLED; ++ ++ pcs->bits_per_mux = of_property_read_bool(np, "pinctrl-single,bit-per-mux"); ++ ret = pcs_quirk_missing_pinctrl_cells(pcs, np, pcs->bits_per_mux ? 2 : 1); ++ if (ret) { ++ dev_err(&pdev->dev, "unable to patch #pinctrl-cells\n"); ++ ++ return ret; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(pcs->dev, "could not get resource\n"); ++ return -ENODEV; ++ } ++ ++ pcs->res = devm_request_mem_region(pcs->dev, res->start, ++ resource_size(res), DRIVER_NAME); ++ if (!pcs->res) { ++ dev_err(pcs->dev, "could not get mem_region\n"); ++ return -EBUSY; ++ } ++ ++ pcs->size = resource_size(pcs->res); ++ pcs->base = devm_ioremap(pcs->dev, pcs->res->start, pcs->size); ++ if (!pcs->base) { ++ dev_err(pcs->dev, "could not ioremap\n"); ++ return -ENODEV; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); ++ if (!res) { ++ dev_err(pcs->dev, "could not get resource\n"); ++ return -ENODEV; ++ } ++ ++ pcs->gedge_flag_res = devm_request_mem_region(pcs->dev, res->start, ++ resource_size(res), ++ DRIVER_NAME); ++ if (!pcs->gedge_flag_res) { ++ dev_err(pcs->dev, "could not get mem_region\n"); ++ return -EBUSY; ++ } ++ ++ pcs->gedge_flag_size = resource_size(pcs->gedge_flag_res); ++ pcs->gedge_flag_base = devm_ioremap(pcs->dev, pcs->gedge_flag_res->start, ++ pcs->gedge_flag_size); ++ if (!pcs->gedge_flag_base) { ++ dev_err(pcs->dev, "could not ioremap\n"); ++ return -ENODEV; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 2); ++ if (!res) { ++ dev_err(pcs->dev, "could not get resource\n"); ++ return -ENODEV; ++ } ++ ++ pcs->gpio_base = ioremap(res->start, resource_size(res)); ++ if (!pcs->gpio_base) { ++ dev_err(pcs->dev, "could not ioremap\n"); ++ return -ENODEV; ++ } ++ ++ platform_set_drvdata(pdev, pcs); ++ ++ switch (pcs->width) { ++ case 8: ++ pcs->read = pcs_readb; ++ pcs->write = pcs_writeb; ++ break; ++ case 16: ++ pcs->read = pcs_readw; ++ pcs->write = pcs_writew; ++ break; ++ case 32: ++ pcs->read = pcs_readl; ++ pcs->write = pcs_writel; ++ break; ++ default: ++ break; ++ } ++ ++ pcs->desc.name = DRIVER_NAME; ++ pcs->desc.pctlops = &pcs_pinctrl_ops; ++ pcs->desc.pmxops = &pcs_pinmux_ops; ++ if (PCS_HAS_PINCONF(pcs)) ++ pcs->desc.confops = &pcs_pinconf_ops; ++ pcs->desc.owner = THIS_MODULE; ++ ++ for (i = 4, base = pcs->base + 4; i < pcs->size; i += 4, base += 4) { ++ regval = pcs->read((void __iomem *)base); ++ regval |= (1 << EDGE_CLEAR); ++ regval &= ~(1 << EDGE_FALL_EN); ++ regval &= ~(1 << EDGE_RISE_EN); ++ pcs->write(regval, (void __iomem *)base); ++ } ++ ++ ret = pcs_allocate_pin_table(pcs); ++ if (ret < 0) ++ goto free; ++ ++ ret = devm_pinctrl_register_and_init(pcs->dev, &pcs->desc, pcs, &pcs->pctl); ++ if (ret) { ++ dev_err(pcs->dev, "could not register single pinctrl driver\n"); ++ goto free; ++ } ++ ++ ret = pcs_add_gpio_func(np, pcs); ++ if (ret < 0) ++ goto free; ++ ++ pcs->socdata.irq = irq_of_parse_and_map(np, 0); ++ if (pcs->socdata.irq) ++ pcs->flags |= PCS_FEAT_IRQ; ++ ++ /* We still need auxdata for some omaps for PRM interrupts */ ++ pdata = dev_get_platdata(&pdev->dev); ++ if (pdata) { ++ if (pdata->rearm) ++ pcs->socdata.rearm = pdata->rearm; ++ if (pdata->irq) { ++ pcs->socdata.irq = pdata->irq; ++ pcs->flags |= PCS_FEAT_IRQ; ++ } ++ } ++ ++ if (PCS_HAS_IRQ(pcs)) { ++ ret = pcs_irq_init_chained_handler(pcs, np); ++ if (ret < 0) ++ dev_warn(pcs->dev, "initialized with no interrupts\n"); ++ } ++ ++ dev_info(pcs->dev, "%i pins, size %u\n", pcs->desc.npins, pcs->size); ++ ++ dev_pm_set_wake_irq(&pdev->dev, pcs->socdata.irq); ++ device_init_wakeup(&pdev->dev, true); ++ ++#ifdef CONFIG_PM_SLEEP ++ pinctrl_pcs = pcs; ++ register_syscore_ops(&pinctrl_syscore_ops); ++#endif ++ ++ ret = pinctrl_enable(pcs->pctl); ++ if (ret) ++ goto free; ++ ++ return 0; ++free: ++ pcs_free_resources(pcs); ++ ++ return ret; ++} ++ ++static int pcs_remove(struct platform_device *pdev) ++{ ++ struct pcs_device *pcs = platform_get_drvdata(pdev); ++ ++ if (!pcs) ++ return 0; ++ ++ pcs_free_resources(pcs); ++ clk_disable_unprepare(psc_clk); ++ reset_control_assert(psc_rst); ++ ++ return 0; ++} ++ ++static const struct pcs_soc_data pinctrl_spacemit_k1x = { ++ .flags = PCS_QUIRK_SHARED_IRQ | PCS_FEAT_PINCONF, ++ .irq_enable_mask = (1 << EDGE_CLEAR), /* WAKEUPENABLE */ ++ .irq_status_mask = (1 << EDGE_CLEAR), /* WAKEUPENABLE */ ++}; ++ ++static const struct of_device_id pcs_of_match[] = { ++ { .compatible = "pinctrl-spacemit-k1x", .data = &pinctrl_spacemit_k1x }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, pcs_of_match); ++ ++static struct platform_driver pcs_driver = { ++ .probe = pcs_probe, ++ .remove = pcs_remove, ++ .driver = { ++ .name = DRIVER_NAME, ++ .of_match_table = pcs_of_match, ++ }, ++}; ++ ++static int __init pcs_driver_init(void) ++{ ++ return platform_driver_register(&pcs_driver); ++} ++postcore_initcall(pcs_driver_init); ++ ++static void __exit pcs_driver_exit(void) ++{ ++ platform_driver_unregister(&pcs_driver); ++} ++module_exit(pcs_driver_exit); +diff --git a/drivers/pinctrl/pinctrl-spacemit-p1.c b/drivers/pinctrl/pinctrl-spacemit-p1.c +new file mode 100644 +index 000000000000..c316ef5784e7 +--- /dev/null ++++ b/drivers/pinctrl/pinctrl-spacemit-p1.c +@@ -0,0 +1,631 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Pinctrl driver for Spacemit P1 ++ * ++ * Copyright (c) 2023, Spacemit Co., Ltd ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "core.h" ++#include "pinctrl-utils.h" ++#include "pinmux.h" ++ ++struct pin_config_desc { ++ unsigned int pin_id; ++ /* input config desc */ ++ struct { ++ unsigned char reg; ++ unsigned char msk; ++ } input; ++ ++ /* output config desc */ ++ struct { ++ unsigned char reg; ++ unsigned char msk; ++ } output; ++ ++ /* pull-down desc */ ++ struct { ++ unsigned char reg; ++ unsigned char msk; ++ } pup; ++ ++ /* deb */ ++ struct { ++ unsigned char reg; ++ unsigned char timemsk; ++ ++ struct { ++ unsigned char msk; ++ } en; ++ } deb; ++ ++ /* OD */ ++ struct { ++ unsigned char reg; ++ unsigned char msk; ++ } od; ++ ++ struct { ++ unsigned char reg; ++ unsigned char msk; ++ } itype; ++}; ++ ++/* pinctrl */ ++struct pin_func_desc { ++ const char *name; ++ unsigned char pin_id; ++ unsigned char func_reg; ++ unsigned char func_mask; ++ unsigned char en_val; ++ unsigned char ha_sub; ++ unsigned char sub_reg; ++ unsigned char sub_mask; ++ unsigned char sube_val; ++}; ++ ++/* pinctrl: match data */ ++struct pinctrl_match_data { ++ int nr_pin_mux; ++ const char **pinmux_funcs; ++ int nr_pin_fuc_desc; ++ const struct pin_func_desc *pinfunc_desc; ++ int nr_pin_conf_desc; ++ const struct pin_config_desc *pinconf_desc; ++ const char *name; ++}; ++ ++struct spm_p1_pctl { ++ struct gpio_chip chip; ++ struct regmap *regmap; ++ struct pinctrl_dev *pctldev; ++ struct device *dev; ++ struct pinctrl_desc pinctrl_desc; ++ int funcdesc_nums; ++ int confdesc_nums; ++ const struct pin_func_desc *func_desc; ++ const struct pin_config_desc *config_desc; ++ const char *name; ++}; ++ ++/* gpio set */ ++const char *spm_p1_pinmux_functions[] = { ++ "gpioin", "gpioout", "exten", "pwrctrl", ++ "sleep", "nreset", "adcin" ++}; ++ ++#define SPM8XX_DESC_PIN_FUNC_COM(_pin_id, _match, _ereg, _emask, \ ++ _enval, _hsub, _subreg, _submask, _subenval \ ++ ) \ ++ { \ ++ .name = (_match), \ ++ .pin_id = (_pin_id), \ ++ .func_reg = (_ereg), \ ++ .func_mask = (_emask), \ ++ .en_val = (_enval), \ ++ .ha_sub = (_hsub), \ ++ .sub_reg = (_subreg), \ ++ .sub_mask = (_submask), \ ++ .sube_val = (_subenval), \ ++ } ++ ++static const struct pin_func_desc spm_p1_pinfunc_desc[] = { ++ /* PIN0 gpioin */ ++ SPM8XX_DESC_PIN_FUNC_COM(0, "gpioin", 0x8, 0x3, 0, 0, 0, 0, 0), ++ /* PIN0 gpioout*/ ++ SPM8XX_DESC_PIN_FUNC_COM(0, "gpioout", 0x8, 0x3, 1, 0, 0, 0, 0), ++ /* PIN0 exten */ ++ SPM8XX_DESC_PIN_FUNC_COM(0, "exten", 0x8, 0x3, 0x3, 1, 0xa, 0x7, 0x0), ++ /* PIN0 pwrctrl */ ++ SPM8XX_DESC_PIN_FUNC_COM(0, "pwrctrl", 0x8, 0x3, 0x3, 1, 0xa, 0x7, 0x1), ++ /* PIN0 sleep */ ++ SPM8XX_DESC_PIN_FUNC_COM(0, "sleep", 0x8, 0x3, 0x3, 1, 0xa, 0x7, 0x2), ++ /* PIN0 nreset */ ++ SPM8XX_DESC_PIN_FUNC_COM(0, "nreset", 0x8, 0x3, 0x3, 1, 0xa, 0x7, 0x3), ++ /* PIN0 adcin */ ++ SPM8XX_DESC_PIN_FUNC_COM(0, "adcin", 0x8, 0x3, 0x3, 1, 0xa, 0x7, 0x4), ++ /* PIN1 gpioin */ ++ SPM8XX_DESC_PIN_FUNC_COM(1, "gpioin", 0x8, 0xc, 0, 0, 0, 0, 0), ++ /* PIN1 gpioout*/ ++ SPM8XX_DESC_PIN_FUNC_COM(1, "gpioout", 0x8, 0xc, 1, 0, 0, 0, 0), ++ /* PIN1 exten */ ++ SPM8XX_DESC_PIN_FUNC_COM(1, "exten", 0x8, 0xc, 0x3, 1, 0xa, 0x38, 0x0), ++ /* PIN1 pwrctrl */ ++ SPM8XX_DESC_PIN_FUNC_COM(1, "pwrctrl", 0x8, 0xc, 0x3, 1, 0xa, 0x38, 0x1), ++ /* PIN1 sleep */ ++ SPM8XX_DESC_PIN_FUNC_COM(1, "sleep", 0x8, 0xc, 0x3, 1, 0xa, 0x38, 0x2), ++ /* PIN1 nreset */ ++ SPM8XX_DESC_PIN_FUNC_COM(1, "nreset", 0x8, 0xc, 0x3, 1, 0xa, 0x38, 0x3), ++ /* PIN1 adcin */ ++ SPM8XX_DESC_PIN_FUNC_COM(1, "adcin", 0x8, 0xc, 0x3, 1, 0xa, 0x38, 0x4), ++ /* PIN2 gpioin */ ++ SPM8XX_DESC_PIN_FUNC_COM(2, "gpioin", 0x8, 0x30, 0, 0, 0, 0, 0), ++ /* PIN2 gpioout*/ ++ SPM8XX_DESC_PIN_FUNC_COM(2, "gpioout", 0x8, 0x30, 1, 0, 0, 0, 0), ++ /* PIN2 exten */ ++ SPM8XX_DESC_PIN_FUNC_COM(2, "exten", 0x8, 0x30, 0x3, 1, 0xb, 0x7, 0x0), ++ /* PIN2 pwrctrl */ ++ SPM8XX_DESC_PIN_FUNC_COM(2, "pwrctrl", 0x8, 0x30, 0x3, 1, 0xb, 0x7, 0x1), ++ /* PIN2 sleep */ ++ SPM8XX_DESC_PIN_FUNC_COM(2, "sleep", 0x8, 0x30, 0x3, 1, 0xb, 0x7, 0x2), ++ /* PIN2 nreset */ ++ SPM8XX_DESC_PIN_FUNC_COM(2, "nreset", 0x8, 0x30, 0x3, 1, 0xb, 0x7, 0x3), ++ /* PIN2 adcin */ ++ SPM8XX_DESC_PIN_FUNC_COM(2, "adcin", 0x8, 0x30, 0x3, 1, 0xb, 0x7, 0x4), ++ /* PIN3 gpioin */ ++ SPM8XX_DESC_PIN_FUNC_COM(3, "gpioin", 0x9, 0x3, 0, 0, 0, 0, 0), ++ /* PIN3 gpioout*/ ++ SPM8XX_DESC_PIN_FUNC_COM(3, "gpioout", 0x9, 0x3, 1, 0, 0, 0, 0), ++ /* PIN3 exten */ ++ SPM8XX_DESC_PIN_FUNC_COM(3, "exten", 0x9, 0x3, 0x3, 1, 0xb, 0x38, 0x0), ++ /* PIN3 pwrctrl */ ++ SPM8XX_DESC_PIN_FUNC_COM(3, "pwrctrl", 0x9, 0x3, 0x3, 1, 0xb, 0x38, 0x1), ++ /* PIN3 sleep */ ++ SPM8XX_DESC_PIN_FUNC_COM(3, "sleep", 0x9, 0x3, 0x3, 1, 0xb, 0x38, 0x2), ++ /* PIN3 nreset */ ++ SPM8XX_DESC_PIN_FUNC_COM(3, "nreset", 0x9, 0x3, 0x3, 1, 0xb, 0x38, 0x3), ++ /* PIN3 adcin */ ++ SPM8XX_DESC_PIN_FUNC_COM(3, "adcin", 0x9, 0x3, 0x3, 1, 0xb, 0x38, 0x4), ++ /* PIN4 gpioin */ ++ SPM8XX_DESC_PIN_FUNC_COM(4, "gpioin", 0x9, 0xc, 0, 0, 0, 0, 0), ++ /* PIN4 gpioout*/ ++ SPM8XX_DESC_PIN_FUNC_COM(4, "gpioout", 0x9, 0xc, 1, 0, 0, 0, 0), ++ /* PIN4 exten */ ++ SPM8XX_DESC_PIN_FUNC_COM(4, "exten", 0x9, 0xc, 0x3, 1, 0xc, 0x7, 0x0), ++ /* PIN4 pwrctrl */ ++ SPM8XX_DESC_PIN_FUNC_COM(4, "pwrctrl", 0x9, 0xc, 0x3, 1, 0xc, 0x7, 0x1), ++ /* PIN4 sleep */ ++ SPM8XX_DESC_PIN_FUNC_COM(4, "sleep", 0x9, 0xc, 0x3, 1, 0xc, 0x7, 0x2), ++ /* PIN4 nreset */ ++ SPM8XX_DESC_PIN_FUNC_COM(4, "nreset", 0x9, 0xc, 0x3, 1, 0xc, 0x7, 0x3), ++ /* PIN4 adcin */ ++ SPM8XX_DESC_PIN_FUNC_COM(4, "adcin", 0x9, 0xc, 0x3, 1, 0xc, 0x7, 0x4), ++ /* PIN5 gpioin */ ++ SPM8XX_DESC_PIN_FUNC_COM(5, "gpioin", 0x9, 0x30, 0, 0, 0, 0, 0), ++ /* PIN5 gpioout*/ ++ SPM8XX_DESC_PIN_FUNC_COM(5, "gpioout", 0x9, 0x30, 1, 0, 0, 0, 0), ++ /* PIN5 exten */ ++ SPM8XX_DESC_PIN_FUNC_COM(5, "exten", 0x9, 0x30, 0x3, 1, 0xc, 0x38, 0x0), ++ /* PIN5 pwrctrl */ ++ SPM8XX_DESC_PIN_FUNC_COM(5, "pwrctrl", 0x9, 0x30, 0x3, 1, 0xc, 0x38, 0x1), ++ /* PIN5 sleep */ ++ SPM8XX_DESC_PIN_FUNC_COM(5, "sleep", 0x9, 0x30, 0x3, 1, 0xc, 0x38, 0x2), ++ /* PIN5 nreset */ ++ SPM8XX_DESC_PIN_FUNC_COM(5, "nreset", 0x9, 0x30, 0x3, 1, 0xc, 0x38, 0x3), ++ /* PIN5 adcin */ ++ SPM8XX_DESC_PIN_FUNC_COM(5, "adcin", 0x9, 0x30, 0x3, 1, 0xc, 0x38, 0x4), ++}; ++ ++#define SPM8XX_DESC_PIN_CONFIG_COM(_pin_id, _ireg, _imsk, _oreg, _omsk, \ ++ _pureg, _pumsk, _debreg, _debtmsk, _debemsk, _odreg, _odmsk, \ ++ _itypereg, _itypemsk \ ++ ) \ ++ { \ ++ .pin_id = (_pin_id), \ ++ .input = { \ ++ .reg = (_ireg), \ ++ .msk = (_imsk), \ ++ }, \ ++ .output = { \ ++ .reg = (_oreg), \ ++ .msk = (_omsk), \ ++ }, \ ++ .pup = { \ ++ .reg = (_pureg), \ ++ .msk = (_pumsk), \ ++ }, \ ++ .deb = { \ ++ .reg = (_debreg), \ ++ .timemsk = (_debtmsk), \ ++ .en.msk = (_debemsk) \ ++ }, \ ++ .od = { \ ++ .reg = (_odreg), \ ++ .msk = (_odmsk), \ ++ }, \ ++ .itype = { \ ++ .reg = (_itypereg), \ ++ .msk = (_itypemsk), \ ++ }, \ ++ } ++ ++static const struct pin_config_desc spm_p1_pinconfig_desc[] = { ++ SPM8XX_DESC_PIN_CONFIG_COM(0, 0x0, 0x1, 0x1, 0x1, 0x2, 0x3, 0x4, ++ 0xc0, 0x1, 0x5, 0x1, 0x6, 0x3), ++ SPM8XX_DESC_PIN_CONFIG_COM(1, 0x0, 0x2, 0x1, 0x2, 0x2, 0xC, 0x4, ++ 0xc0, 0x2, 0x5, 0x2, 0x6, 0xC), ++ SPM8XX_DESC_PIN_CONFIG_COM(2, 0x0, 0x4, 0x1, 0x4, 0x2, 0x30, 0x4, ++ 0xc0, 0x4, 0x5, 0x4, 0x6, 0x30), ++ SPM8XX_DESC_PIN_CONFIG_COM(3, 0x0, 0x8, 0x1, 0x8, 0x3, 0x3, 0x4, ++ 0xc0, 0x8, 0x5, 0x8, 0x7, 0x3), ++ SPM8XX_DESC_PIN_CONFIG_COM(4, 0x0, 0x10, 0x1, 0x10, 0x3, 0xc, 0x4, ++ 0xc0, 0x10, 0x5, 0x10, 0x7, 0xc), ++ SPM8XX_DESC_PIN_CONFIG_COM(5, 0x0, 0x20, 0x1, 0x20, 0x3, 0x30, 0x4, ++ 0xc0, 0x20, 0x5, 0x20, 0x7, 0x30), ++}; ++ ++static struct pinctrl_match_data spm_p1_pinctrl_match_data = { ++ .nr_pin_mux = ARRAY_SIZE(spm_p1_pinmux_functions), ++ .pinmux_funcs = spm_p1_pinmux_functions, ++ .nr_pin_fuc_desc = ARRAY_SIZE(spm_p1_pinfunc_desc), ++ .pinfunc_desc = spm_p1_pinfunc_desc, ++ .nr_pin_conf_desc = ARRAY_SIZE(spm_p1_pinconfig_desc), ++ .pinconf_desc = spm_p1_pinconfig_desc, ++ .name = "spm_p1", ++}; ++ ++static const struct pinctrl_ops spm_p1_gpio_pinctrl_ops = { ++ .get_groups_count = pinctrl_generic_get_group_count, ++ .get_group_name = pinctrl_generic_get_group_name, ++ .get_group_pins = pinctrl_generic_get_group_pins, ++ .dt_node_to_map = pinconf_generic_dt_node_to_map_group, ++ .dt_free_map = pinconf_generic_dt_free_map, ++}; ++ ++static int spm_p1_gpio_pinmux_set(struct pinctrl_dev *pctldev, unsigned int function, ++ unsigned int group) ++{ ++ int i, ret; ++ struct spm_p1_pctl *pctl = pinctrl_dev_get_drvdata(pctldev); ++ const char *funcname = pinmux_generic_get_function_name(pctldev, function); ++ ++ /* get the target desc */ ++ for (i = 0; i < pctl->funcdesc_nums; ++i) { ++ if (strcmp(funcname, pctl->func_desc[i].name) == 0 && group == ++ pctl->func_desc[i].pin_id) { ++ /* set the first */ ++ ret = regmap_update_bits(pctl->regmap, ++ pctl->func_desc[i].func_reg, ++ pctl->func_desc[i].func_mask, ++ pctl->func_desc[i].en_val ++ << (ffs(pctl->func_desc[i].func_mask) - 1)); ++ if (ret) { ++ dev_err(pctl->dev, "set PIN%d, function:%s, failed\n", ++ group, funcname); ++ return ret; ++ } ++ ++ /* set the next if it have */ ++ if (pctl->func_desc[i].ha_sub) { ++ ret = regmap_update_bits(pctl->regmap, ++ pctl->func_desc[i].sub_reg, ++ pctl->func_desc[i].sub_mask, ++ pctl->func_desc[i].sube_val ++ << (ffs(pctl->func_desc[i].sub_mask) - 1)); ++ if (ret) { ++ dev_err(pctl->dev, ++ "set PIN%d, function:%s, failed\n", ++ group, funcname); ++ return ret; ++ } ++ } ++ ++ break; ++ } ++ } ++ ++ if (i >= pctl->funcdesc_nums) { ++ dev_err(pctl->dev, "Unsupported PIN%d, function:%s\n", group, funcname); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int spm_p1_pmx_gpio_set_direction(struct pinctrl_dev *pctldev, ++ struct pinctrl_gpio_range *range, ++ unsigned int offset, bool input) ++{ ++ int ret; ++ struct spm_p1_pctl *pctl = pinctrl_dev_get_drvdata(pctldev); ++ ++ if (strcmp(pctl->name, "spm_p1") == 0) ++ /* when input == true, it means that we should set this pin ++ * as gpioin, so we should pass function(0) to set_mux ++ */ ++ ret = spm_p1_gpio_pinmux_set(pctldev, !input, offset); ++ else ++ return -EINVAL; ++ ++ return ret; ++} ++ ++static const struct pinmux_ops spm_p1_gpio_pinmux_ops = { ++ .get_functions_count = pinmux_generic_get_function_count, ++ .get_function_name = pinmux_generic_get_function_name, ++ .get_function_groups = pinmux_generic_get_function_groups, ++ .set_mux = spm_p1_gpio_pinmux_set, ++ .gpio_set_direction = spm_p1_pmx_gpio_set_direction, ++ .strict = true, ++}; ++ ++static int spm_p1_gpio_get(struct gpio_chip *chip, unsigned int offset) ++{ ++ int ret; ++ unsigned int val; ++ struct spm_p1_pctl *pctl = gpiochip_get_data(chip); ++ ++ ret = regmap_read(pctl->regmap, pctl->config_desc[offset].input.reg, &val); ++ if (ret) { ++ dev_err(pctl->dev, "get PIN%d, direction failed\n", offset); ++ return ret; ++ } ++ ++ val = val & pctl->config_desc[offset].input.msk; ++ val >>= ffs(pctl->config_desc[offset].input.msk) - 1; ++ ++ return val; ++} ++ ++static int spm_p1_gpio_get_direction(struct gpio_chip *chip, ++ unsigned int offset) ++{ ++ int i, ret; ++ unsigned int val, direction = 0; ++ struct spm_p1_pctl *pctl = gpiochip_get_data(chip); ++ ++ /* read the function set register */ ++ for (i = 0; i < pctl->funcdesc_nums; ++i) { ++ if (offset == pctl->func_desc[i].pin_id) { ++ ret = regmap_read(pctl->regmap, pctl->func_desc[i].func_reg, &val); ++ if (ret) { ++ dev_err(pctl->dev, "get PIN%d, direction failed\n", offset); ++ return ret; ++ } ++ ++ direction = val & pctl->func_desc[i].func_mask; ++ direction >>= ffs(pctl->func_desc[i].func_mask) - 1; ++ ++ break; ++ } ++ } ++ ++ if (strcmp(pctl->name, "spm_p1") == 0) ++ return !direction; ++ else ++ return -EINVAL; ++} ++ ++static void spm_p1_gpio_set(struct gpio_chip *chip, unsigned int offset, ++ int value) ++{ ++ int ret; ++ struct spm_p1_pctl *pctl = gpiochip_get_data(chip); ++ ++ ret = regmap_update_bits(pctl->regmap, ++ pctl->config_desc[offset].output.reg, ++ pctl->config_desc[offset].output.msk, ++ value ? pctl->config_desc[offset].output.msk : 0); ++ if (ret) ++ dev_err(pctl->dev, "set PIN%d, val:%d, failed\n", offset, value); ++} ++ ++static int spm_p1_gpio_input(struct gpio_chip *chip, unsigned int offset) ++{ ++ /* set the gpio input */ ++ return pinctrl_gpio_direction_input(chip->base + offset); ++} ++ ++static int spm_p1_gpio_output(struct gpio_chip *chip, unsigned int offset, ++ int value) ++{ ++ /* set the gpio output */ ++ return pinctrl_gpio_direction_input(chip->base + offset); ++} ++ ++static int spm_p1_pin_pconf_get(struct pinctrl_dev *pctldev, unsigned int pin, ++ unsigned long *config) ++{ ++ /* Do nothing by now */ ++ return 0; ++} ++ ++static int spm_p1_pin_pconf_set(struct pinctrl_dev *pctldev, unsigned int pin, ++ unsigned long *configs, unsigned int num_configs) ++{ ++ unsigned int reg, msk, ret; ++ struct spm_p1_pctl *pctl = pinctrl_dev_get_drvdata(pctldev); ++ ++ while (num_configs) { ++ switch (pinconf_to_config_param(*configs)) { ++ case PIN_CONFIG_BIAS_DISABLE: ++ case PIN_CONFIG_BIAS_PULL_DOWN: ++ case PIN_CONFIG_BIAS_PULL_UP: ++ reg = pctl->config_desc[pin].pup.reg; ++ msk = pctl->config_desc[pin].pup.msk; ++ break; ++ case PIN_CONFIG_DRIVE_OPEN_DRAIN: ++ case PIN_CONFIG_DRIVE_PUSH_PULL: ++ case PIN_CONFIG_DRIVE_OPEN_SOURCE: ++ reg = pctl->config_desc[pin].od.reg; ++ msk = pctl->config_desc[pin].od.msk; ++ break; ++ case PIN_CONFIG_INPUT_DEBOUNCE: ++ reg = pctl->config_desc[pin].deb.reg; ++ msk = pctl->config_desc[pin].deb.timemsk; ++ break; ++ case PIN_CONFIG_INPUT_SCHMITT_ENABLE: ++ reg = pctl->config_desc[pin].deb.reg; ++ msk = pctl->config_desc[pin].deb.en.msk; ++ break; ++ case PIN_CONFIG_OUTPUT: ++ reg = pctl->config_desc[pin].output.reg; ++ msk = pctl->config_desc[pin].output.msk; ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ ret = regmap_update_bits(pctl->regmap, reg, msk, ++ pinconf_to_config_argument(*configs) ++ << (ffs(msk) - 1)); ++ if (ret) { ++ dev_err(pctl->dev, "set reg:%x, msk:%x failed\n", reg, msk); ++ return -EINVAL; ++ } ++ ++configs; ++ --num_configs; ++ } ++ ++ return 0; ++} ++ ++static int spm_p1_pconf_group_set(struct pinctrl_dev *pctldev, unsigned int group, ++ unsigned long *configs, unsigned int num_configs) ++{ ++ return spm_p1_pin_pconf_set(pctldev, group, configs, num_configs); ++} ++ ++static int spm_p1_pconf_group_get(struct pinctrl_dev *pctldev, unsigned int group, ++ unsigned long *config) ++{ ++ return spm_p1_pin_pconf_get(pctldev, group, config); ++} ++ ++static const struct pinconf_ops spm_p1_gpio_pinconf_ops = { ++ .is_generic = true, ++ .pin_config_get = spm_p1_pin_pconf_get, ++ .pin_config_set = spm_p1_pin_pconf_set, ++ .pin_config_group_get = spm_p1_pconf_group_get, ++ .pin_config_group_set = spm_p1_pconf_group_set, ++}; ++ ++static const struct of_device_id spm_p1_pinctrl_of_match[] = { ++ { .compatible = "spacemit,p1,pinctrl", .data = (void *)&spm_p1_pinctrl_match_data }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, spm_p1_pinctrl_of_match); ++ ++static int spm_p1_pinctrl_probe(struct platform_device *pdev) ++{ ++ int i, res; ++ struct spm_p1_pctl *pctl; ++ unsigned int npins; ++ const char **pin_names; ++ unsigned int *pin_nums; ++ struct pinctrl_pin_desc *pins; ++ const struct of_device_id *of_id; ++ struct spacemit_pmic *pmic = dev_get_drvdata(pdev->dev.parent); ++ struct pinctrl_match_data *match_data; ++ ++ of_id = of_match_device(spm_p1_pinctrl_of_match, &pdev->dev); ++ if (!of_id) { ++ pr_err("Unable to match OF ID\n"); ++ return -ENODEV; ++ } ++ ++ match_data = (struct pinctrl_match_data *)of_id->data; ++ ++ pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL); ++ if (!pctl) ++ return -ENOMEM; ++ ++ pctl->name = match_data->name; ++ pctl->dev = &pdev->dev; ++ pctl->regmap = pmic->regmap; ++ pctl->func_desc = match_data->pinfunc_desc; ++ pctl->funcdesc_nums = match_data->nr_pin_fuc_desc; ++ pctl->config_desc = match_data->pinconf_desc; ++ pctl->confdesc_nums = match_data->nr_pin_conf_desc; ++ dev_set_drvdata(&pdev->dev, pctl); ++ ++ if (of_property_read_u32(pdev->dev.of_node, "spm_pmic,npins", &npins)) ++ return dev_err_probe(&pdev->dev, -EINVAL, "spm_pmic,npins property not found\n"); ++ ++ pins = devm_kmalloc_array(&pdev->dev, npins, sizeof(pins[0]), ++ GFP_KERNEL); ++ pin_names = devm_kmalloc_array(&pdev->dev, npins, sizeof(pin_names[0]), ++ GFP_KERNEL); ++ pin_nums = devm_kmalloc_array(&pdev->dev, npins, sizeof(pin_nums[0]), ++ GFP_KERNEL); ++ if (!pins || !pin_names || !pin_nums) ++ return -ENOMEM; ++ ++ for (i = 0; i < npins; i++) { ++ pins[i].number = i; ++ pins[i].name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "PIN%u", i); ++ pins[i].drv_data = pctl; ++ pin_names[i] = pins[i].name; ++ pin_nums[i] = i; ++ } ++ ++ pctl->pinctrl_desc.name = dev_name(pctl->dev); ++ pctl->pinctrl_desc.pins = pins; ++ pctl->pinctrl_desc.npins = npins; ++ pctl->pinctrl_desc.pctlops = &spm_p1_gpio_pinctrl_ops; ++ pctl->pinctrl_desc.pmxops = &spm_p1_gpio_pinmux_ops; ++ pctl->pinctrl_desc.confops = &spm_p1_gpio_pinconf_ops; ++ ++ pctl->pctldev = devm_pinctrl_register(&pdev->dev, &pctl->pinctrl_desc, pctl); ++ if (IS_ERR(pctl->pctldev)) ++ return dev_err_probe(&pdev->dev, PTR_ERR(pctl->pctldev), ++ "Failed to register pinctrl device.\n"); ++ ++ for (i = 0; i < npins; i++) { ++ res = pinctrl_generic_add_group(pctl->pctldev, pins[i].name, ++ pin_nums + i, 1, pctl); ++ if (res < 0) ++ return dev_err_probe(pctl->dev, res, ++ "Failed to register group"); ++ } ++ ++ for (i = 0; i < match_data->nr_pin_mux; ++i) { ++ res = pinmux_generic_add_function(pctl->pctldev, match_data->pinmux_funcs[i], ++ pin_names, npins, pctl); ++ if (res < 0) ++ return dev_err_probe(pctl->dev, res, "Failed to register function."); ++ } ++ ++ pctl->chip.base = -1; ++ pctl->chip.can_sleep = true; ++ pctl->chip.request = gpiochip_generic_request; ++ pctl->chip.free = gpiochip_generic_free; ++ pctl->chip.parent = &pdev->dev; ++ pctl->chip.label = dev_name(&pdev->dev); ++ pctl->chip.owner = THIS_MODULE; ++ pctl->chip.get = spm_p1_gpio_get; ++ pctl->chip.get_direction = spm_p1_gpio_get_direction; ++ pctl->chip.set = spm_p1_gpio_set; ++ pctl->chip.direction_input = spm_p1_gpio_input; ++ pctl->chip.direction_output = spm_p1_gpio_output; ++ ++ pctl->chip.ngpio = pctl->pinctrl_desc.npins; ++ ++ res = devm_gpiochip_add_data(&pdev->dev, &pctl->chip, pctl); ++ if (res) { ++ dev_err(&pdev->dev, "Failed to register GPIO chip\n"); ++ return res; ++ } ++ ++ return 0; ++} ++ ++static struct platform_driver spm_p1_pinctrl_driver = { ++ .probe = spm_p1_pinctrl_probe, ++ .driver = { ++ .name = "spm-p1-pinctrl", ++ .of_match_table = spm_p1_pinctrl_of_match, ++ }, ++}; ++module_platform_driver(spm_p1_pinctrl_driver); ++ ++MODULE_DESCRIPTION("Spacemit P1 pinctrl driver"); ++MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/pinctrl-th1520.c b/drivers/pinctrl/pinctrl-th1520.c new file mode 100644 index 000000000000..2247ace7a7c7 @@ -447503,11 +491194,812 @@ index 000000000000..f3a30f0275b1 +ssize_t pinmux_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size); +#endif +diff --git a/drivers/pinctrl/ultrarisc/Kconfig b/drivers/pinctrl/ultrarisc/Kconfig +new file mode 100644 +index 000000000000..fb7144f5502e +--- /dev/null ++++ b/drivers/pinctrl/ultrarisc/Kconfig +@@ -0,0 +1,23 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++ ++config PINCTRL_ULTRARISC ++ bool ++ depends on OF ++ select PINMUX ++ select GENERIC_PINCTRL_GROUPS ++ select GENERIC_PINCONF ++ select GENERIC_PINMUX_FUNCTIONS ++ select GPIOLIB ++ select IRQ_DOMAIN_HIERARCHY ++ select MFD_SYSCON ++ ++config PINCTRL_ULTRARISC_DP1000 ++ tristate "Pinctrl driver of UltraRISC DP1000" ++ select PINCTRL_ULTRARISC ++ depends on OF && HAS_IOMEM ++ help ++ This driver configures the UltraRISC DP1000 SoC's pinctrl subsystem. ++ ++ This option is only visible when device-tree support (`OF`) is enabled ++ and physical memory-mapped I/O regions are available (`HAS_IOMEM`), ++ and it automatically selects the core `Pinctrl ULTRARISC` driver. +diff --git a/drivers/pinctrl/ultrarisc/Makefile b/drivers/pinctrl/ultrarisc/Makefile +new file mode 100644 +index 000000000000..78f03c450e66 +--- /dev/null ++++ b/drivers/pinctrl/ultrarisc/Makefile +@@ -0,0 +1,4 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++ ++obj-$(CONFIG_PINCTRL_ULTRARISC) += pinctrl-ultrarisc.o ++obj-$(CONFIG_PINCTRL_ULTRARISC_DP1000) += pinctrl-ultrarisc-dp1000.o +diff --git a/drivers/pinctrl/ultrarisc/pinctrl-ultrarisc-dp1000.c b/drivers/pinctrl/ultrarisc/pinctrl-ultrarisc-dp1000.c +new file mode 100644 +index 000000000000..78970c154018 +--- /dev/null ++++ b/drivers/pinctrl/ultrarisc/pinctrl-ultrarisc-dp1000.c +@@ -0,0 +1,122 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* UltraRISC DP1000 pinctrl driver ++ * ++ * Copyright(C) 2025 UltraRISC Technology Co., Ltd. ++ * ++ * Author: wangjia ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "../pinctrl-utils.h" ++#include "../pinmux.h" ++#include "../core.h" ++#include "../devicetree.h" ++ ++#include "pinctrl-ultrarisc.h" ++ ++static const struct pinctrl_pin_desc ur_dp1000_pins[] = { ++ // PA ++ PINCTRL_PIN(0, "PA0"), ++ PINCTRL_PIN(1, "PA1"), ++ PINCTRL_PIN(2, "PA2"), ++ PINCTRL_PIN(3, "PA3"), ++ PINCTRL_PIN(4, "PA4"), ++ PINCTRL_PIN(5, "PA5"), ++ PINCTRL_PIN(6, "PA6"), ++ PINCTRL_PIN(7, "PA7"), ++ PINCTRL_PIN(8, "PA8"), ++ PINCTRL_PIN(9, "PA9"), ++ PINCTRL_PIN(10, "PA10"), ++ PINCTRL_PIN(11, "PA11"), ++ PINCTRL_PIN(12, "PA12"), ++ PINCTRL_PIN(13, "PA13"), ++ PINCTRL_PIN(14, "PA14"), ++ PINCTRL_PIN(15, "PA15"), ++ // PB ++ PINCTRL_PIN(16, "PB0"), ++ PINCTRL_PIN(17, "PB1"), ++ PINCTRL_PIN(18, "PB2"), ++ PINCTRL_PIN(19, "PB3"), ++ PINCTRL_PIN(20, "PB4"), ++ PINCTRL_PIN(21, "PB5"), ++ PINCTRL_PIN(22, "PB6"), ++ PINCTRL_PIN(23, "PB7"), ++ // PC ++ PINCTRL_PIN(24, "PC0"), ++ PINCTRL_PIN(25, "PC1"), ++ PINCTRL_PIN(26, "PC2"), ++ PINCTRL_PIN(27, "PC3"), ++ PINCTRL_PIN(28, "PC4"), ++ PINCTRL_PIN(29, "PC5"), ++ PINCTRL_PIN(30, "PC6"), ++ PINCTRL_PIN(31, "PC7"), ++ // PD ++ PINCTRL_PIN(32, "PD0"), ++ PINCTRL_PIN(33, "PD1"), ++ PINCTRL_PIN(34, "PD2"), ++ PINCTRL_PIN(35, "PD3"), ++ PINCTRL_PIN(36, "PD4"), ++ PINCTRL_PIN(37, "PD5"), ++ PINCTRL_PIN(38, "PD6"), ++ PINCTRL_PIN(39, "PD7"), ++ // LPC ++ PINCTRL_PIN(40, "LPC0"), ++ PINCTRL_PIN(41, "LPC1"), ++ PINCTRL_PIN(42, "LPC2"), ++ PINCTRL_PIN(43, "LPC3"), ++ PINCTRL_PIN(44, "LPC4"), ++ PINCTRL_PIN(45, "LPC5"), ++ PINCTRL_PIN(46, "LPC6"), ++ PINCTRL_PIN(47, "LPC7"), ++ PINCTRL_PIN(48, "LPC8"), ++ PINCTRL_PIN(49, "LPC9"), ++ PINCTRL_PIN(50, "LPC10"), ++ PINCTRL_PIN(51, "LPC11"), ++ PINCTRL_PIN(52, "LPC12"), ++}; ++ ++static struct ur_pinctrl_match_data ur_dp1000_match_data = { ++ .pins = ur_dp1000_pins, ++ .npins = ARRAY_SIZE(ur_dp1000_pins), ++ .offset = 0x2c0, ++ .ports = { ++ {"A", 16, 0x2c0, 0x310}, ++ {"B", 8, 0x2c4, 0x318}, ++ {"C", 8, 0x2c8, 0x31c}, ++ {"D", 8, 0x2cc, 0x320}, ++ {"LPC", 13, 0x2d0, 0x324}, ++ }, ++}; ++ ++enum ur_dp1000_port_list { ++ PORT_A = 0, ++ PORT_B, ++ PORT_C, ++ PORT_D, ++ PORT_LPC ++}; ++ ++ ++static const struct of_device_id ur_pinctrl_of_match[] = { ++ { .compatible = "ultrarisc,dp1000-pinctrl", .data = &ur_dp1000_match_data, }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, ur_pinctrl_of_match); ++ ++static struct platform_driver ur_pinctrl_driver = { ++ .driver = { ++ .name = "ultrarisc-pinctrl-dp1000", ++ .of_match_table = ur_pinctrl_of_match, ++ }, ++ .probe = ur_pinctrl_probe, ++ .remove = ur_pinctrl_remove, ++}; ++ ++module_platform_driver(ur_pinctrl_driver); +diff --git a/drivers/pinctrl/ultrarisc/pinctrl-ultrarisc.c b/drivers/pinctrl/ultrarisc/pinctrl-ultrarisc.c +new file mode 100644 +index 000000000000..7caae71a0ae8 +--- /dev/null ++++ b/drivers/pinctrl/ultrarisc/pinctrl-ultrarisc.c +@@ -0,0 +1,499 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* UltraRISC pinctrl driver ++ * ++ * Copyright(C) 2025 UltraRISC Technology Co., Ltd. ++ * ++ * Author: wangjia ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "../pinctrl-utils.h" ++#include "../pinmux.h" ++#include "../core.h" ++#include "../devicetree.h" ++ ++#include "pinctrl-ultrarisc.h" ++ ++static int ur_pin_to_desc(struct pinctrl_dev *pctldev, struct ur_pin_val *pin_val) ++{ ++ int index = 0; ++ struct ur_pinctrl *ur_pinctrl = pinctrl_dev_get_drvdata(pctldev); ++ const struct ur_pinctrl_match_data *ur_match_data = ur_pinctrl->match_data; ++ ++ for (int i = 0; i < pin_val->port; i++) ++ index += ur_match_data->ports[i].npins; ++ index += pin_val->pin; ++ dev_dbg(pctldev->dev, "port %d pin %d index %d\n", pin_val->port, pin_val->pin, index); ++ return index; ++} ++ ++static int ur_subnode_to_pin(struct pinctrl_dev *pctldev, ++ const char *name, ++ enum pinctrl_map_type type, ++ struct device_node *np, ++ int **pins, ++ struct ur_pin_val **pin_val, ++ int *pin_num) ++{ ++ struct ur_pin_val *pin_vals; ++ int rows; ++ int ret = -EINVAL; ++ int *group_pins; ++ const char **pgnames; ++ ++ dev_dbg(pctldev->dev, "pinctrl node %s\n", np->name); ++ rows = pinctrl_count_index_with_args(np, name); ++ if (rows < 0) { ++ dev_err(pctldev->dev, "%s count is invalid %d\n", name, rows); ++ return rows; ++ } ++ ++ pin_vals = devm_kcalloc(pctldev->dev, rows, sizeof(*pin_vals), GFP_KERNEL); ++ if (!pin_vals) ++ return -ENOMEM; ++ ++ group_pins = devm_kcalloc(pctldev->dev, rows, sizeof(*group_pins), GFP_KERNEL); ++ if (!group_pins) { ++ ret = -ENOMEM; ++ goto free_pin_vals; ++ } ++ ++ pgnames = devm_kzalloc(pctldev->dev, sizeof(*pgnames), GFP_KERNEL); ++ if (!pgnames) { ++ ret = -ENOMEM; ++ goto free_pins; ++ } ++ ++ for (int i = 0; i < rows; i++) { ++ struct of_phandle_args pin_args; ++ ++ ret = pinctrl_parse_index_with_args(np, name, i, &pin_args); ++ if (ret) { ++ dev_err(pctldev->dev, "parse args of %s index %d failed\n", name, i); ++ goto free_pgnames; ++ } ++ ++ if (pin_args.args_count < 3) { ++ dev_err(pctldev->dev, "invalid args_count(%d) of %s index %d/%d\n", ++ pin_args.args_count, name, i, rows); ++ ret = -EINVAL; ++ goto free_pgnames; ++ } ++ pin_vals[i].port = pin_args.args[0]; ++ pin_vals[i].pin = pin_args.args[1]; ++ pin_vals[i].mode = pin_args.args[2]; ++ ++ dev_dbg(pctldev->dev, "found a pinctrl: port=%d pin=%d val=0x%x\n", ++ pin_vals[i].port, pin_vals[i].pin, pin_vals[i].mode); ++ ++ group_pins[i] = ur_pin_to_desc(pctldev, &pin_vals[i]); ++ } ++ ++ dev_dbg(pctldev->dev, "get an pinmux of %s\n", np->name); ++ ++ ret = pinctrl_generic_add_group(pctldev, np->name, group_pins, rows, pin_vals); ++ if (ret < 0) { ++ dev_err(pctldev->dev, "add group %s failed\n", np->name); ++ goto free_pgnames; ++ } ++ ++ *pgnames = np->name; ++ ret = pinmux_generic_add_function(pctldev, np->name, pgnames, 1, NULL); ++ if (ret < 0) { ++ dev_err(pctldev->dev, "add function %s failed\n", np->name); ++ goto free_group; ++ } ++ ++ dev_dbg(pctldev->dev, "add group and function of %s\n", np->name); ++ ++ *pins = group_pins; ++ *pin_val = pin_vals; ++ *pin_num = rows; ++ ++ return 0; ++ ++free_group: ++ pinctrl_generic_remove_group(pctldev, ret); ++free_pgnames: ++ devm_kfree(pctldev->dev, pgnames); ++free_pins: ++ devm_kfree(pctldev->dev, group_pins); ++free_pin_vals: ++ devm_kfree(pctldev->dev, pin_vals); ++ return ret; ++} ++ ++static int ur_pinmux_to_map(struct pinctrl_dev *pctldev, ++ struct device_node *np, ++ struct pinctrl_map *map) ++{ ++ int ret; ++ int *pins; ++ struct ur_pin_val *pin_vals; ++ int pin_num; ++ ++ ret = ur_subnode_to_pin(pctldev, PINMUX_PROP_NAME, PIN_MAP_TYPE_MUX_GROUP, ++ np, &pins, &pin_vals, &pin_num); ++ if (ret) { ++ dev_err(pctldev->dev, "get pinmux data %s failed\n", np->name); ++ return ret; ++ } ++ ++ map->type = PIN_MAP_TYPE_MUX_GROUP; ++ map->data.mux.group = np->name; ++ map->data.mux.function = np->name; ++ ++ dev_dbg(pctldev->dev, "type=%d, mux.group=%s, mux.function=%s\n", ++ map->type, map->data.mux.group, map->data.mux.function); ++ ++ return 0; ++} ++ ++static int ur_pinconf_to_map(struct pinctrl_dev *pctldev, ++ struct device_node *np, ++ struct pinctrl_map *map) ++{ ++ int ret; ++ int *pins; ++ struct ur_pin_val *pin; ++ int pin_num; ++ ++ ret = ur_subnode_to_pin(pctldev, PINCONF_PROP_NAME, PIN_MAP_TYPE_CONFIGS_GROUP, ++ np, &pins, &pin, &pin_num); ++ if (ret) { ++ dev_err(pctldev->dev, "get pinconf data %s failed\n", np->name); ++ return ret; ++ } ++ ++ dev_dbg(pctldev->dev, "get an pinconf of %s\n", np->name); ++ map->type = PIN_MAP_TYPE_CONFIGS_GROUP; ++ map->data.configs.group_or_pin = np->name; ++ map->data.configs.configs = (unsigned long *)pin; ++ map->data.configs.num_configs = pin_num; ++ ++ dev_dbg(pctldev->dev, "type=%d, config.group_or_pin=%s, configs.num_config=%d\n", ++ map->type, map->data.configs.group_or_pin, map->data.configs.num_configs); ++ ++ return 0; ++} ++ ++static int ur_dt_node_to_map(struct pinctrl_dev *pctldev, ++ struct device_node *np, ++ struct pinctrl_map **map, ++ unsigned int *num_maps) ++{ ++ int ret; ++ bool mux_present = false, conf_present = false; ++ struct pinctrl_map *new_map; ++ unsigned int map_num = 0, prop_count = 0; ++ ++ if (of_property_present(np, PINMUX_PROP_NAME)) { ++ mux_present = true; ++ prop_count++; ++ } ++ if (of_property_present(np, PINCONF_PROP_NAME)) { ++ conf_present = true; ++ prop_count++; ++ } ++ ++ if (!prop_count) { ++ dev_err(pctldev->dev, "no pinctrl node(%d) in %s\n", prop_count, np->name); ++ return -EINVAL; ++ } ++ ++ new_map = devm_kmalloc_array(pctldev->dev, prop_count, sizeof(**map), GFP_KERNEL); ++ if (!new_map) ++ return -ENOMEM; ++ ++ *map = new_map; ++ if (mux_present) { ++ ret = ur_pinmux_to_map(pctldev, np, new_map); ++ if (!ret) { ++ new_map++; ++ map_num++; ++ } ++ } ++ if (conf_present) { ++ ret = ur_pinconf_to_map(pctldev, np, new_map); ++ if (!ret) ++ map_num++; ++ } ++ ++ if (!map_num) { ++ dev_err(pctldev->dev, "no pinctrl info of %s failed\n", np->name); ++ goto free_map; ++ } ++ *num_maps = map_num; ++ ++ return 0; ++ ++free_map: ++ devm_kfree(pctldev->dev, new_map); ++ return ret; ++} ++ ++static void ur_dt_free_map(struct pinctrl_dev *pctldev, ++ struct pinctrl_map *map, unsigned int num_maps) ++{ ++ if (map) ++ devm_kfree(pctldev->dev, map); ++} ++ ++static void ur_pin_dbg_show(struct pinctrl_dev *pctldev, ++ struct seq_file *s, unsigned int offset) ++{ ++ seq_printf(s, "%s", dev_name(pctldev->dev)); ++} ++ ++static const struct pinctrl_ops ur_pinctrl_ops = { ++ .get_groups_count = pinctrl_generic_get_group_count, ++ .get_group_name = pinctrl_generic_get_group_name, ++ .get_group_pins = pinctrl_generic_get_group_pins, ++ .dt_node_to_map = ur_dt_node_to_map, ++ .dt_free_map = ur_dt_free_map, ++ .pin_dbg_show = ur_pin_dbg_show, ++}; ++ ++static int ur_set_pin_mux(struct ur_pinctrl *pin_ctrl, struct ur_pin_val *pin_vals) ++{ ++ unsigned long flag; ++ void __iomem *reg; ++ u32 val; ++ const struct ur_port_desc *port; ++ ++ port = &pin_ctrl->match_data->ports[pin_vals->port]; ++ ++ reg = pin_ctrl->base + port->func_offset; ++ ++ raw_spin_lock_irqsave(&pin_ctrl->lock, flag); ++ val = readl_relaxed(reg); ++ val &= ~((UR_FUNC0 | UR_FUNC1)<pin); ++ val |= (pin_vals->mode << pin_vals->pin); ++ writel_relaxed(val, reg); ++ raw_spin_unlock_irqrestore(&pin_ctrl->lock, flag); ++ ++ return 0; ++} ++ ++static int ur_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector, ++ unsigned int group_selector) ++{ ++ struct ur_pinctrl *ur_pinctrl = pinctrl_dev_get_drvdata(pctldev); ++ struct group_desc *ur_group; ++ struct ur_pin_val *pin_vals; ++ ++ dev_dbg(pctldev->dev, "set mux: func_selector=%d, group_selector=%d\n", ++ func_selector, group_selector); ++ ur_group = pinctrl_generic_get_group(pctldev, group_selector); ++ if (!ur_group) { ++ dev_err(pctldev->dev, "get group %d failed\n", group_selector); ++ return -EINVAL; ++ } ++ ++ dev_dbg(pctldev->dev, "get group %s, num_pins=%d\n", ur_group->name, ur_group->num_pins); ++ pin_vals = ur_group->data; ++ if (!pin_vals) { ++ dev_err(pctldev->dev, "data of %s is invalid\n", ur_group->name); ++ return -EINVAL; ++ } ++ ++ for (int i = 0; i < ur_group->num_pins; i++) ++ ur_set_pin_mux(ur_pinctrl, &pin_vals[i]); ++ ++ return 0; ++} ++ ++static const struct pinmux_ops ur_pinmux_ops = { ++ .get_functions_count = pinmux_generic_get_function_count, ++ .get_function_name = pinmux_generic_get_function_name, ++ .get_function_groups = pinmux_generic_get_function_groups, ++ .set_mux = ur_set_mux, ++ .strict = true, ++}; ++ ++#define UR_CONF_BIT_PER_PIN (4) ++#define UR_CONF_PIN_PER_REG (32/UR_CONF_BIT_PER_PIN) ++static int ur_set_pin_conf(struct ur_pinctrl *pin_ctrl, struct ur_pin_val *pin_vals) ++{ ++ const struct ur_port_desc *port_desc; ++ unsigned long flag; ++ void __iomem *reg; ++ u32 val, conf; ++ ++ port_desc = &pin_ctrl->match_data->ports[pin_vals->port]; ++ reg = pin_ctrl->base + port_desc->conf_offset; ++ dev_dbg(pin_ctrl->dev, "pinconf base=0x%llx, reg=0x%llx\n", (u64)pin_ctrl->base, (u64)reg); ++ reg += (pin_vals->pin / UR_CONF_PIN_PER_REG) * UR_CONF_BIT_PER_PIN; ++ dev_dbg(pin_ctrl->dev, "pinconf pin=0x%llx\n", (u64)reg); ++ ++ conf = pin_vals->conf << ((pin_vals->pin % UR_CONF_PIN_PER_REG) * UR_CONF_BIT_PER_PIN); ++ dev_dbg(pin_ctrl->dev, "pinconf conf=0x%x\n", conf); ++ ++ raw_spin_lock_irqsave(&pin_ctrl->lock, flag); ++ val = readl_relaxed(reg); ++ val &= ~(UR_BIAS_MASK << ((pin_vals->pin % UR_CONF_PIN_PER_REG) * UR_CONF_BIT_PER_PIN)); ++ val |= conf; ++ writel_relaxed(val, reg); ++ raw_spin_unlock_irqrestore(&pin_ctrl->lock, flag); ++ dev_dbg(pin_ctrl->dev, "pinconf val=0x%x\n", val); ++ ++ return 0; ++} ++ ++static int ur_pin_config_get(struct pinctrl_dev *pctldev, ++ unsigned int pin, ++ unsigned long *config) ++{ ++ dev_dbg(pctldev->dev, "%s(%d): pin=%d\n", __func__, __LINE__, pin); ++ ++ return -EOPNOTSUPP; ++} ++ ++static int ur_pin_config_set(struct pinctrl_dev *pctldev, ++ unsigned int pin, ++ unsigned long *configs, ++ unsigned int num_configs) ++{ ++ struct ur_pin_val *pin_conf; ++ struct ur_pinctrl *ur_pinctrl = pinctrl_dev_get_drvdata(pctldev); ++ ++ dev_dbg(pctldev->dev, "%s(%d): pin=%d, num_configs=%d\n", ++ __func__, __LINE__, pin, num_configs); ++ pin_conf = (struct ur_pin_val *)configs; ++ for (int i = 0; i < num_configs; i++) { ++ dev_dbg(pctldev->dev, "pinconf[%d], port=%d, pin=%d, conf=0x%x\n", ++ i, pin_conf[i].port, pin_conf[i].pin, pin_conf[i].conf); ++ ur_set_pin_conf(ur_pinctrl, &pin_conf[i]); ++ } ++ return 0; ++} ++ ++static int ur_pin_config_group_get(struct pinctrl_dev *pctldev, ++ unsigned int selector, ++ unsigned long *config) ++{ ++ dev_dbg(pctldev->dev, "%s(%d): selector=%d, config=0x%lx\n", ++ __func__, __LINE__, selector, *config); ++ return -EOPNOTSUPP; ++} ++ ++int ur_pin_config_group_set(struct pinctrl_dev *pctldev, ++ unsigned int selector, ++ unsigned long *configs, ++ unsigned int num_configs) ++{ ++ struct group_desc *ur_group; ++ struct ur_pin_val *pin_conf; ++ struct ur_pinctrl *ur_pinctrl = pinctrl_dev_get_drvdata(pctldev); ++ ++ dev_dbg(pctldev->dev, "%s(%d): selector=%d, num_configs=%d\n", ++ __func__, __LINE__, selector, num_configs); ++ ur_group = pinctrl_generic_get_group(pctldev, selector); ++ if (!ur_group) { ++ dev_err(pctldev->dev, "Cannot get group by selector %d\n", selector); ++ return -EINVAL; ++ } ++ ++ dev_dbg(pctldev->dev, "get pinconf group %s\n", ur_group->name); ++ pin_conf = (struct ur_pin_val *)configs; ++ for (int i = 0; i < num_configs; i++) { ++ dev_dbg(pctldev->dev, "pinconf[%d], port=%d, pin=%d, conf=0x%x\n", ++ i, pin_conf[i].port, pin_conf[i].pin, pin_conf[i].conf); ++ ur_set_pin_conf(ur_pinctrl, &pin_conf[i]); ++ } ++ return 0; ++} ++ ++static const struct pinconf_ops ur_pinconf_ops = { ++ .pin_config_get = ur_pin_config_get, ++ .pin_config_set = ur_pin_config_set, ++ .pin_config_group_get = ur_pin_config_group_get, ++ .pin_config_group_set = ur_pin_config_group_set, ++#ifdef CONFIG_GENERIC_PINCONF ++ .is_generic = true, ++#endif ++}; ++ ++int ur_pinctrl_probe(struct platform_device *pdev) ++{ ++ struct pinctrl_desc *ur_pinctrl_desc; ++ const struct ur_pinctrl_match_data *pins_data; ++ struct ur_pinctrl *ur_pinctrl; ++ int ret; ++ ++ pins_data = of_device_get_match_data(&pdev->dev); ++ if (!pins_data) ++ return -ENODEV; ++ ++ ur_pinctrl_desc = devm_kzalloc(&pdev->dev, sizeof(*ur_pinctrl_desc), GFP_KERNEL); ++ if (!ur_pinctrl_desc) ++ return -ENOMEM; ++ ++ ur_pinctrl = devm_kzalloc(&pdev->dev, sizeof(*ur_pinctrl), GFP_KERNEL); ++ if (!ur_pinctrl) { ++ ret = -ENOMEM; ++ goto free_pinctrl_desc; ++ } ++ struct resource *res; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ dev_dbg(&pdev->dev, "iomem start=0x%llx\n", res->start); ++ ur_pinctrl->base = devm_platform_ioremap_resource(pdev, 0); ++ if (IS_ERR(ur_pinctrl->base)) { ++ dev_err(&pdev->dev, "get ioremap resource failed\n"); ++ ret = -EINVAL; ++ goto free_pinctrl_desc; ++ } ++ dev_dbg(&pdev->dev, "pinctrl base=0x%p\n", ur_pinctrl->base); ++ ur_pinctrl_desc->name = dev_name(&pdev->dev); ++ ur_pinctrl_desc->owner = THIS_MODULE; ++ ur_pinctrl_desc->pins = pins_data->pins; ++ ur_pinctrl_desc->npins = pins_data->npins; ++ ur_pinctrl_desc->pctlops = &ur_pinctrl_ops; ++ ur_pinctrl_desc->pmxops = &ur_pinmux_ops; ++ ur_pinctrl_desc->confops = &ur_pinconf_ops; ++ ++ ur_pinctrl->dev = &pdev->dev; ++ ur_pinctrl->match_data = pins_data; ++ ur_pinctrl->pctl_desc = ur_pinctrl_desc; ++ raw_spin_lock_init(&ur_pinctrl->lock); ++ mutex_init(&ur_pinctrl->mutex); ++ ++ ret = devm_pinctrl_register_and_init(&pdev->dev, ur_pinctrl_desc, ++ ur_pinctrl, &ur_pinctrl->pctl_dev); ++ if (ret) { ++ dev_err(&pdev->dev, "pinctrl register failed\n"); ++ goto free_pinctrl; ++ } ++ ++ platform_set_drvdata(pdev, ur_pinctrl); ++ ++ return pinctrl_enable(ur_pinctrl->pctl_dev); ++ ++free_pinctrl: ++ devm_kfree(&pdev->dev, ur_pinctrl); ++free_pinctrl_desc: ++ devm_kfree(&pdev->dev, ur_pinctrl_desc); ++ return ret; ++} ++ ++ ++int ur_pinctrl_remove(struct platform_device *pdev) ++{ ++ struct ur_pinctrl *ur_pinctrl = platform_get_drvdata(pdev); ++ ++ if (ur_pinctrl->pctl_dev) ++ devm_pinctrl_unregister(&pdev->dev, ur_pinctrl->pctl_dev); ++ ++ return 0; ++} +diff --git a/drivers/pinctrl/ultrarisc/pinctrl-ultrarisc.h b/drivers/pinctrl/ultrarisc/pinctrl-ultrarisc.h +new file mode 100644 +index 000000000000..ff30ddca9564 +--- /dev/null ++++ b/drivers/pinctrl/ultrarisc/pinctrl-ultrarisc.h +@@ -0,0 +1,77 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* UltraRISC pinctrl driver ++ * ++ * Copyright(C) 2025 UltraRISC Technology Co., Ltd. ++ * ++ * Author: wangjia ++ */ ++ ++#ifndef __PINCTRL_ULTRARISC_H__ ++#define __PINCTRL_ULTRARISC_H__ ++ ++#include ++#include ++ ++#define PINMUX_PROP_NAME "pinctrl-pins" ++#define PINCONF_PROP_NAME "pinconf-pins" ++ ++struct ur_pin_conf { ++ u16 pull; ++ u16 drive; ++}; ++ ++struct ur_pin_val { ++ u32 port; ++ u32 pin; ++ union { ++ u32 mode; ++ u32 conf; ++ }; ++#define UR_FUNC_DEF 0 ++#define UR_FUNC0 1 ++#define UR_FUNC1 0x10000 ++ ++#define UR_BIAS_MASK 0x0000000F ++#define UR_PULL_MASK 0x0C ++#define UR_PULL_DIS 0 ++#define UR_PULL_UP 1 ++#define UR_PULL_DOWN 2 ++#define UR_DRIVE_MASK 0x03 ++}; ++ ++struct ur_port_desc { ++ char *name; ++ u32 npins; ++ u32 func_offset; ++ u32 conf_offset; ++}; ++ ++struct ur_pinctrl_match_data { ++ const struct pinctrl_pin_desc *pins; ++ u32 npins; ++ u32 offset; ++ struct ur_port_desc ports[]; ++}; ++ ++ ++struct ur_pinctrl { ++ struct device *dev; ++ struct pinctrl_dev *pctl_dev; ++ struct pinctrl_desc *pctl_desc; ++ void __iomem *base; ++ unsigned int ngroups; ++ const char **grp_names; ++ unsigned int nbanks; ++ const struct ur_pinctrl_match_data *match_data; ++ struct regmap *regmap; ++ raw_spinlock_t lock; ++ struct mutex mutex; ++ struct pinctrl_pin_desc *pins; ++ u32 npins; ++ u32 pkg; ++}; ++ ++int ur_pinctrl_probe(struct platform_device *pdev); ++int ur_pinctrl_remove(struct platform_device *pdev); ++ ++#endif +diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c +index 897cdd9c3aae..b8eaf6b9f8e1 100644 +--- a/drivers/platform/surface/surface_acpi_notify.c ++++ b/drivers/platform/surface/surface_acpi_notify.c +@@ -740,24 +740,26 @@ static bool is_san_consumer(struct platform_device *pdev, acpi_handle handle) + { + struct acpi_handle_list dep_devices; + acpi_handle supplier = ACPI_HANDLE(&pdev->dev); +- acpi_status status; ++ bool ret = false; + int i; + + if (!acpi_has_method(handle, "_DEP")) + return false; + +- status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices); +- if (ACPI_FAILURE(status)) { ++ if (!acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices)) { + san_consumer_dbg(&pdev->dev, handle, "failed to evaluate _DEP\n"); + return false; + } + + for (i = 0; i < dep_devices.count; i++) { +- if (dep_devices.handles[i] == supplier) +- return true; ++ if (dep_devices.handles[i] == supplier) { ++ ret = true; ++ break; ++ } + } + +- return false; ++ acpi_handle_list_free(&dep_devices); ++ return ret; + } + + static acpi_status san_consumer_setup(acpi_handle handle, u32 lvl, diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig -index 8ebcddf91f7b..1aa6ad3947f8 100644 +index da57f4a2bde0..a34c95ce1145 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig -@@ -637,6 +637,17 @@ config PWM_TEGRA +@@ -468,7 +468,7 @@ config PWM_PCA9685 + + config PWM_PXA + tristate "PXA PWM support" +- depends on ARCH_PXA || ARCH_MMP || COMPILE_TEST ++ depends on ARCH_PXA || ARCH_MMP || SOC_SPACEMIT || COMPILE_TEST + depends on HAS_IOMEM + help + Generic PWM framework driver for PXA. +@@ -649,6 +649,17 @@ config PWM_TEGRA To compile this driver as a module, choose M here: the module will be called pwm-tegra. @@ -447526,10 +492018,10 @@ index 8ebcddf91f7b..1aa6ad3947f8 100644 tristate "ECAP PWM support" depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile -index c822389c2a24..d18c8c54b5b3 100644 +index 5d5b64c25b7f..b825630e7de6 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile -@@ -50,6 +50,7 @@ obj-$(CONFIG_PWM_RZ_MTU3) += pwm-rz-mtu3.o +@@ -51,6 +51,7 @@ obj-$(CONFIG_PWM_RZ_MTU3) += pwm-rz-mtu3.o obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o obj-$(CONFIG_PWM_SIFIVE) += pwm-sifive.o obj-$(CONFIG_PWM_SL28CPLD) += pwm-sl28cpld.o @@ -447537,7 +492029,7 @@ index c822389c2a24..d18c8c54b5b3 100644 obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o obj-$(CONFIG_PWM_SPRD) += pwm-sprd.o obj-$(CONFIG_PWM_STI) += pwm-sti.o -@@ -59,6 +60,7 @@ obj-$(CONFIG_PWM_STMPE) += pwm-stmpe.o +@@ -60,6 +61,7 @@ obj-$(CONFIG_PWM_STMPE) += pwm-stmpe.o obj-$(CONFIG_PWM_SUN4I) += pwm-sun4i.o obj-$(CONFIG_PWM_SUNPLUS) += pwm-sunplus.o obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o @@ -447545,6 +492037,76 @@ index c822389c2a24..d18c8c54b5b3 100644 obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o obj-$(CONFIG_PWM_TWL) += pwm-twl.o +diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c +index 1e475ed10180..2a0b0d75f1bd 100644 +--- a/drivers/pwm/pwm-pxa.c ++++ b/drivers/pwm/pwm-pxa.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -53,6 +54,7 @@ struct pxa_pwm_chip { + struct device *dev; + + struct clk *clk; ++ struct reset_control *reset; + void __iomem *mmio_base; + }; + +@@ -150,6 +152,7 @@ static const struct of_device_id pwm_of_match[] = { + { .compatible = "marvell,pxa270-pwm", .data = &pwm_id_table[0]}, + { .compatible = "marvell,pxa168-pwm", .data = &pwm_id_table[0]}, + { .compatible = "marvell,pxa910-pwm", .data = &pwm_id_table[0]}, ++ { .compatible = "spacemit,k1-pwm", .data = &pwm_id_table[0]}, + { } + }; + MODULE_DEVICE_TABLE(of, pwm_of_match); +@@ -177,6 +180,10 @@ static int pwm_probe(struct platform_device *pdev) + if (IS_ERR(pc->clk)) + return PTR_ERR(pc->clk); + ++ pc->reset = devm_reset_control_get_optional(&pdev->dev, NULL); ++ if (!IS_ERR(pc->reset)) ++ reset_control_deassert(pc->reset); ++ + pc->chip.dev = &pdev->dev; + pc->chip.ops = &pxa_pwm_ops; + pc->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1; +@@ -187,18 +194,27 @@ static int pwm_probe(struct platform_device *pdev) + } + + pc->mmio_base = devm_platform_ioremap_resource(pdev, 0); +- if (IS_ERR(pc->mmio_base)) +- return PTR_ERR(pc->mmio_base); ++ if (IS_ERR(pc->mmio_base)) { ++ ret = PTR_ERR(pc->mmio_base); ++ goto err_rst; ++ } + + ret = devm_pwmchip_add(&pdev->dev, &pc->chip); + if (ret < 0) { + dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); +- return ret; ++ goto err_rst; + } + + return 0; ++ ++err_rst: ++ if (!IS_ERR(pc->reset)) ++ reset_control_assert(pc->reset); ++ ++ return ret; + } + ++ + static struct platform_driver pwm_driver = { + .driver = { + .name = "pxa25x-pwm", diff --git a/drivers/pwm/pwm-sophgo.c b/drivers/pwm/pwm-sophgo.c new file mode 100644 index 000000000000..b6297175a5b7 @@ -448104,10 +492666,24 @@ index 000000000000..3246c3b98aad +MODULE_DESCRIPTION("XuanTie pwm driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig -index 965d4f0c18a6..4fb4d316c939 100644 +index 965d4f0c18a6..83e1977bc666 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig -@@ -1663,4 +1663,14 @@ config REGULATOR_QCOM_LABIBB +@@ -1335,6 +1335,13 @@ config REGULATOR_SM5703 + This driver provides support for voltage regulators of SM5703 + multi-function device. + ++config REGULATOR_SPACEMIT_P1 ++ tristate "Spacemit P1 PMIC Regulators" ++ depends on MFD_SPACEMIT_P1 ++ help ++ This driver provides support for the voltage regulators on the ++ Spacemit P1 PMIC. ++ + config REGULATOR_STM32_BOOSTER + tristate "STMicroelectronics STM32 BOOSTER" + depends on ARCH_STM32 || COMPILE_TEST +@@ -1663,4 +1670,14 @@ config REGULATOR_QCOM_LABIBB boost regulator and IBB can be used as a negative boost regulator for LCD display panel. @@ -448123,16 +492699,298 @@ index 965d4f0c18a6..4fb4d316c939 100644 + endif diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile -index 23074714a81a..6d44facb7dfc 100644 +index 23074714a81a..a4d3e1c3db6f 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile -@@ -195,5 +195,6 @@ obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o +@@ -156,6 +156,7 @@ obj-$(CONFIG_REGULATOR_SC2731) += sc2731-regulator.o + obj-$(CONFIG_REGULATOR_SKY81452) += sky81452-regulator.o + obj-$(CONFIG_REGULATOR_SLG51000) += slg51000-regulator.o + obj-$(CONFIG_REGULATOR_SM5703) += sm5703-regulator.o ++obj-$(CONFIG_REGULATOR_SPACEMIT_P1) += spacemit-p1-regulator.o + obj-$(CONFIG_REGULATOR_STM32_BOOSTER) += stm32-booster.o + obj-$(CONFIG_REGULATOR_STM32_VREFBUF) += stm32-vrefbuf.o + obj-$(CONFIG_REGULATOR_STM32_PWR) += stm32-pwr.o +@@ -195,5 +196,6 @@ obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o +obj-$(CONFIG_REGULATOR_TH1520_AON) +=th1520-aon-regulator.o ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG +diff --git a/drivers/regulator/spacemit-p1-regulator.c b/drivers/regulator/spacemit-p1-regulator.c +new file mode 100644 +index 000000000000..4463190befdf +--- /dev/null ++++ b/drivers/regulator/spacemit-p1-regulator.c +@@ -0,0 +1,268 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Regulator driver for Spacemit P1 ++ * ++ * Copyright (c) 2023, SPACEMIT Co., Ltd ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* regulator: match data */ ++struct regulator_match_data { ++ int nr_desc; ++ int sleep_reg_offset; ++ const struct regulator_desc *desc; ++ const char *name; ++}; ++ ++static struct regulator_match_data *match_data; ++ ++static int spm_p1_regulator_set_suspend_voltage(struct regulator_dev *rdev, int uV) ++{ ++ unsigned int reg; ++ ++ int sel = regulator_map_voltage_linear_range(rdev, uV, uV); ++ ++ if (sel < 0) ++ return -EINVAL; ++ ++ /* means that we will disable this vol in suspend */ ++ if (uV == rdev->constraints->max_uV) ++ /* BUCK will set 0xff to close the power */ ++ sel = rdev->desc->vsel_mask; ++ else if (uV == rdev->constraints->min_uV) ++ /* LDO will set zero to close the power */ ++ sel = 0; ++ ++ reg = rdev->desc->vsel_reg + match_data->sleep_reg_offset; ++ ++ return regmap_update_bits(rdev->regmap, reg, rdev->desc->vsel_mask, sel); ++} ++ ++static const struct regulator_ops pmic_dcdc_ldo_ops = { ++ .list_voltage = regulator_list_voltage_linear_range, ++ .map_voltage = regulator_map_voltage_linear_range, ++ .get_voltage_sel = regulator_get_voltage_sel_regmap, ++ .set_voltage_sel = regulator_set_voltage_sel_regmap, ++ .set_voltage_time_sel = regulator_set_voltage_time_sel, ++ .enable = regulator_enable_regmap, ++ .disable = regulator_disable_regmap, ++ .is_enabled = regulator_is_enabled_regmap, ++ .set_suspend_voltage = spm_p1_regulator_set_suspend_voltage, ++}; ++ ++static const struct regulator_ops pmic_switch_ops = { ++ .enable = regulator_enable_regmap, ++ .disable = regulator_disable_regmap, ++ .is_enabled = regulator_is_enabled_regmap, ++}; ++ ++static const struct linear_range spm_p1_buck_ranges[] = { ++ REGULATOR_LINEAR_RANGE(500000, 0x0, 0xaa, 5000), ++ REGULATOR_LINEAR_RANGE(1375000, 0xab, 0xff, 25000), ++}; ++ ++static const struct linear_range spm_p1_ldo_ranges[] = { ++ REGULATOR_LINEAR_RANGE(500000, 0xb, 0x7f, 25000), ++}; ++ ++/* common regulator defination */ ++#define SPM8XX_DESC_COMMON(_id, _match, _supply, _nv, _vr, _vm, _er, _em, _lr, _ops) \ ++ { \ ++ .name = (_match), \ ++ .supply_name = (_supply), \ ++ .of_match = of_match_ptr(_match), \ ++ .regulators_node = of_match_ptr("regulators"), \ ++ .ops = _ops, \ ++ .type = REGULATOR_VOLTAGE, \ ++ .id = (_id), \ ++ .n_voltages = (_nv), \ ++ .owner = THIS_MODULE, \ ++ .vsel_reg = (_vr), \ ++ .vsel_mask = (_vm), \ ++ .enable_reg = (_er), \ ++ .enable_mask = (_em), \ ++ .volt_table = NULL, \ ++ .linear_ranges = (_lr), \ ++ .n_linear_ranges = ARRAY_SIZE(_lr), \ ++ } ++ ++#define SPM8XX_DESC_SWITCH_COM(_id, _match, _supply, _ereg, _emask, \ ++ _enval, _disval, _ops) \ ++ { \ ++ .name = (_match), \ ++ .supply_name = (_supply), \ ++ .of_match = of_match_ptr(_match), \ ++ .regulators_node = of_match_ptr("regulators"), \ ++ .type = REGULATOR_VOLTAGE, \ ++ .id = (_id), \ ++ .enable_reg = (_ereg), \ ++ .enable_mask = (_emask), \ ++ .enable_val = (_enval), \ ++ .disable_val = (_disval), \ ++ .owner = THIS_MODULE, \ ++ .ops = _ops \ ++ } ++ ++/* regulator configuration */ ++#define SPM_P1_DESC(_id, _match, _supply, _nv, _vr, _vm, _er, _em, _lr) \ ++ SPM8XX_DESC_COMMON(_id, _match, _supply, _nv, _vr, _vm, _er, _em, _lr, \ ++ &pmic_dcdc_ldo_ops) ++ ++#define SPM_P1_DESC_SWITCH(_id, _match, _supply, _ereg, _emask) \ ++ SPM8XX_DESC_SWITCH_COM(_id, _match, _supply, _ereg, _emask, \ ++ 0, 0, &pmic_switch_ops) ++ ++static const struct regulator_desc spm_p1_reg[] = { ++ /* BUCK */ ++ SPM_P1_DESC(SPM_P1_ID_DCDC1, "DCDC_REG1", "vcc_sys", ++ 255, SPM_P1_BUCK1_VSEL_REG, SPM_P1_BUCK_VSEL_MASK, ++ SPM_P1_BUCK1_CTRL_REG, SMP8821_BUCK_EN_MASK, ++ spm_p1_buck_ranges), ++ ++ SPM_P1_DESC(SPM_P1_ID_DCDC2, "DCDC_REG2", "vcc_sys", ++ 255, SPM_P1_BUCK2_VSEL_REG, SPM_P1_BUCK_VSEL_MASK, ++ SPM_P1_BUCK2_CTRL_REG, SMP8821_BUCK_EN_MASK, ++ spm_p1_buck_ranges), ++ ++ SPM_P1_DESC(SPM_P1_ID_DCDC3, "DCDC_REG3", "vcc_sys", ++ 255, SPM_P1_BUCK3_VSEL_REG, SPM_P1_BUCK_VSEL_MASK, ++ SPM_P1_BUCK3_CTRL_REG, SMP8821_BUCK_EN_MASK, ++ spm_p1_buck_ranges), ++ ++ SPM_P1_DESC(SPM_P1_ID_DCDC4, "DCDC_REG4", "vcc_sys", ++ 255, SPM_P1_BUCK4_VSEL_REG, SPM_P1_BUCK_VSEL_MASK, ++ SPM_P1_BUCK4_CTRL_REG, SMP8821_BUCK_EN_MASK, ++ spm_p1_buck_ranges), ++ ++ SPM_P1_DESC(SPM_P1_ID_DCDC5, "DCDC_REG5", "vcc_sys", ++ 255, SPM_P1_BUCK5_VSEL_REG, SPM_P1_BUCK_VSEL_MASK, ++ SPM_P1_BUCK5_CTRL_REG, SMP8821_BUCK_EN_MASK, ++ spm_p1_buck_ranges), ++ ++ SPM_P1_DESC(SPM_P1_ID_DCDC6, "DCDC_REG6", "vcc_sys", ++ 255, SPM_P1_BUCK6_VSEL_REG, SPM_P1_BUCK_VSEL_MASK, ++ SPM_P1_BUCK6_CTRL_REG, SMP8821_BUCK_EN_MASK, ++ spm_p1_buck_ranges), ++ /* ALDO */ ++ SPM_P1_DESC(SPM_P1_ID_LDO1, "LDO_REG1", "vcc_sys", ++ 128, SPM_P1_ALDO1_VOLT_REG, SPM_P1_ALDO_VSEL_MASK, ++ SPM_P1_ALDO1_CTRL_REG, SPM_P1_ALDO_EN_MASK, spm_p1_ldo_ranges), ++ ++ SPM_P1_DESC(SPM_P1_ID_LDO2, "LDO_REG2", "vcc_sys", ++ 128, SPM_P1_ALDO2_VOLT_REG, SPM_P1_ALDO_VSEL_MASK, ++ SPM_P1_ALDO2_CTRL_REG, SPM_P1_ALDO_EN_MASK, spm_p1_ldo_ranges), ++ ++ SPM_P1_DESC(SPM_P1_ID_LDO3, "LDO_REG3", "vcc_sys", ++ 128, SPM_P1_ALDO3_VOLT_REG, SPM_P1_ALDO_VSEL_MASK, ++ SPM_P1_ALDO3_CTRL_REG, SPM_P1_ALDO_EN_MASK, spm_p1_ldo_ranges), ++ ++ SPM_P1_DESC(SPM_P1_ID_LDO4, "LDO_REG4", "vcc_sys", ++ 128, SPM_P1_ALDO4_VOLT_REG, SPM_P1_ALDO_VSEL_MASK, ++ SPM_P1_ALDO4_CTRL_REG, SPM_P1_ALDO_EN_MASK, spm_p1_ldo_ranges), ++ ++ /* DLDO */ ++ SPM_P1_DESC(SPM_P1_ID_LDO5, "LDO_REG5", "dcdc5", ++ 128, SPM_P1_DLDO1_VOLT_REG, SPM_P1_DLDO_VSEL_MASK, ++ SPM_P1_DLDO1_CTRL_REG, SPM_P1_DLDO_EN_MASK, spm_p1_ldo_ranges), ++ ++ SPM_P1_DESC(SPM_P1_ID_LDO6, "LDO_REG6", "dcdc5", ++ 128, SPM_P1_DLDO2_VOLT_REG, SPM_P1_DLDO_VSEL_MASK, ++ SPM_P1_DLDO2_CTRL_REG, SPM_P1_DLDO_EN_MASK, spm_p1_ldo_ranges), ++ ++ SPM_P1_DESC(SPM_P1_ID_LDO7, "LDO_REG7", "dcdc5", ++ 128, SPM_P1_DLDO3_VOLT_REG, SPM_P1_DLDO_VSEL_MASK, ++ SPM_P1_DLDO3_CTRL_REG, SPM_P1_DLDO_EN_MASK, spm_p1_ldo_ranges), ++ ++ SPM_P1_DESC(SPM_P1_ID_LDO8, "LDO_REG8", "dcdc5", ++ 128, SPM_P1_DLDO4_VOLT_REG, SPM_P1_DLDO_VSEL_MASK, ++ SPM_P1_DLDO4_CTRL_REG, SPM_P1_DLDO_EN_MASK, spm_p1_ldo_ranges), ++ ++ SPM_P1_DESC(SPM_P1_ID_LDO9, "LDO_REG9", "dcdc5", ++ 128, SPM_P1_DLDO5_VOLT_REG, SPM_P1_DLDO_VSEL_MASK, ++ SPM_P1_DLDO5_CTRL_REG, SPM_P1_DLDO_EN_MASK, spm_p1_ldo_ranges), ++ ++ SPM_P1_DESC(SPM_P1_ID_LDO10, "LDO_REG10", "dcdc5", ++ 128, SPM_P1_DLDO6_VOLT_REG, SPM_P1_DLDO_VSEL_MASK, ++ SPM_P1_DLDO6_CTRL_REG, SPM_P1_DLDO_EN_MASK, spm_p1_ldo_ranges), ++ ++ SPM_P1_DESC(SPM_P1_ID_LDO11, "LDO_REG11", "dcdc5", ++ 128, SPM_P1_DLDO7_VOLT_REG, SPM_P1_DLDO_VSEL_MASK, ++ SPM_P1_DLDO7_CTRL_REG, SPM_P1_DLDO_EN_MASK, spm_p1_ldo_ranges), ++ ++ /* PWR SWITCH */ ++ SPM_P1_DESC_SWITCH(SPM_P1_ID_SWITCH1, "SWITCH_REG1", "vcc_sys", ++ SPM_P1_SWITCH_CTRL_REG, SPM_P1_SWTICH_EN_MASK), ++}; ++ ++static struct regulator_match_data spm_p1_regulator_match_data = { ++ .nr_desc = ARRAY_SIZE(spm_p1_reg), ++ .desc = spm_p1_reg, ++ .name = "spm_p1", ++ .sleep_reg_offset = SPM_P1_SLEEP_REG_OFFSET, ++}; ++ ++static const struct of_device_id spm_p1_regulator_of_match[] = { ++ { .compatible = "spacemit,p1,regulator", .data = (void *)&spm_p1_regulator_match_data }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, spm_p1_regulator_of_match); ++ ++static int spm_p1_regulator_probe(struct platform_device *pdev) ++{ ++ struct regulator_config config = {}; ++ struct spacemit_pmic *pmic = dev_get_drvdata(pdev->dev.parent); ++ struct i2c_client *client; ++ const struct of_device_id *of_id; ++ struct regulator_dev *regulator_dev; ++ int i; ++ ++ of_id = of_match_device(spm_p1_regulator_of_match, &pdev->dev); ++ if (!of_id) { ++ pr_err("Unable to match OF ID\n"); ++ return -ENODEV; ++ } ++ ++ match_data = (struct regulator_match_data *)of_id->data; ++ ++ client = pmic->i2c; ++ config.dev = &client->dev; ++ config.regmap = pmic->regmap; ++ ++ for (i = 0; i < match_data->nr_desc; ++i) { ++ regulator_dev = devm_regulator_register(&pdev->dev, ++ match_data->desc + i, &config); ++ if (IS_ERR(regulator_dev)) { ++ pr_err("failed to register %d regulator\n", i); ++ return PTR_ERR(regulator_dev); ++ } ++ } ++ ++ return 0; ++} ++ ++static struct platform_driver spm_p1_regulator_driver = { ++ .probe = spm_p1_regulator_probe, ++ .driver = { ++ .name = "spm-p1-regulator", ++ .of_match_table = spm_p1_regulator_of_match, ++ }, ++}; ++ ++static int spm_p1_regulator_init(void) ++{ ++ return platform_driver_register(&spm_p1_regulator_driver); ++} ++subsys_initcall(spm_p1_regulator_init); ++ ++MODULE_DESCRIPTION("regulator drivers for the Spacemit series PMICs"); ++MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/th1520-aon-regulator.c b/drivers/regulator/th1520-aon-regulator.c new file mode 100644 index 000000000000..dea23e7d127e @@ -448910,7 +493768,7 @@ index 000000000000..dea23e7d127e +MODULE_DESCRIPTION("XuanTie TH1520 Aon regulator virtual driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig -index ccd59ddd7610..b72f8626e03b 100644 +index ccd59ddd7610..663427a27d87 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -253,6 +253,16 @@ config RESET_SUNXI @@ -448930,8 +493788,21 @@ index ccd59ddd7610..b72f8626e03b 100644 config RESET_TI_SCI tristate "TI System Control Interface (TI-SCI) reset driver" depends on TI_SCI_PROTOCOL || (COMPILE_TEST && TI_SCI_PROTOCOL=n) +@@ -318,6 +328,12 @@ config RESET_ZYNQ + help + This enables the reset controller driver for Xilinx Zynq SoCs. + ++config RESET_K1X_SPACEMIT ++ tristate "Reset controller driver for Spacemit K1X SoCs" ++ depends on SOC_SPACEMIT_K1X ++ help ++ Support for reset controllers on Spacemit K1X SoCs. ++ + source "drivers/reset/starfive/Kconfig" + source "drivers/reset/sti/Kconfig" + source "drivers/reset/hisilicon/Kconfig" diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile -index 8270da8a4baa..ca0abdce468d 100644 +index 8270da8a4baa..4d61ad1991f2 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -33,6 +33,8 @@ obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o @@ -448943,6 +493814,11 @@ index 8270da8a4baa..ca0abdce468d 100644 obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o obj-$(CONFIG_RESET_TI_TPS380X) += reset-tps380x.o +@@ -41,3 +43,4 @@ obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o + obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o + obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o + obj-$(CONFIG_ARCH_ZYNQMP) += reset-zynqmp.o ++obj-$(CONFIG_RESET_K1X_SPACEMIT) += reset-spacemit-k1x.o diff --git a/drivers/reset/reset-sophgo.c b/drivers/reset/reset-sophgo.c new file mode 100644 index 000000000000..3c46a43e24ba @@ -449112,6 +493988,681 @@ index 000000000000..3c46a43e24ba +MODULE_AUTHOR("Wei Huang"); +MODULE_DESCRIPTION("Bitmain SoC Reset Controoler Driver"); +MODULE_LICENSE("GPL"); +diff --git a/drivers/reset/reset-spacemit-k1x.c b/drivers/reset/reset-spacemit-k1x.c +new file mode 100644 +index 000000000000..d2ea9c9f196c +--- /dev/null ++++ b/drivers/reset/reset-spacemit-k1x.c +@@ -0,0 +1,669 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Spacemit k1x reset controller driver ++ * ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* APBC register offset */ ++#define APBC_UART1_CLK_RST 0x0 ++#define APBC_UART2_CLK_RST 0x4 ++#define APBC_GPIO_CLK_RST 0x8 ++#define APBC_PWM0_CLK_RST 0xc ++#define APBC_PWM1_CLK_RST 0x10 ++#define APBC_PWM2_CLK_RST 0x14 ++#define APBC_PWM3_CLK_RST 0x18 ++#define APBC_TWSI8_CLK_RST 0x20 ++#define APBC_UART3_CLK_RST 0x24 ++#define APBC_RTC_CLK_RST 0x28 ++#define APBC_TWSI0_CLK_RST 0x2c ++#define APBC_TWSI1_CLK_RST 0x30 ++#define APBC_TIMERS1_CLK_RST 0x34 ++#define APBC_TWSI2_CLK_RST 0x38 ++#define APBC_AIB_CLK_RST 0x3c ++#define APBC_TWSI4_CLK_RST 0x40 ++#define APBC_TIMERS2_CLK_RST 0x44 ++#define APBC_ONEWIRE_CLK_RST 0x48 ++#define APBC_TWSI5_CLK_RST 0x4c ++#define APBC_DRO_CLK_RST 0x58 ++#define APBC_IR_CLK_RST 0x5c ++#define APBC_TWSI6_CLK_RST 0x60 ++#define APBC_TWSI7_CLK_RST 0x68 ++#define APBC_TSEN_CLK_RST 0x6c ++#define APBC_UART4_CLK_RST 0x70 ++#define APBC_UART5_CLK_RST 0x74 ++#define APBC_UART6_CLK_RST 0x78 ++#define APBC_SSP3_CLK_RST 0x7c ++#define APBC_SSPA0_CLK_RST 0x80 ++#define APBC_SSPA1_CLK_RST 0x84 ++#define APBC_IPC_AP2AUD_CLK_RST 0x90 ++#define APBC_UART7_CLK_RST 0x94 ++#define APBC_UART8_CLK_RST 0x98 ++#define APBC_UART9_CLK_RST 0x9c ++#define APBC_CAN0_CLK_RST 0xa0 ++#define APBC_PWM4_CLK_RST 0xa8 ++#define APBC_PWM5_CLK_RST 0xac ++#define APBC_PWM6_CLK_RST 0xb0 ++#define APBC_PWM7_CLK_RST 0xb4 ++#define APBC_PWM8_CLK_RST 0xb8 ++#define APBC_PWM9_CLK_RST 0xbc ++#define APBC_PWM10_CLK_RST 0xc0 ++#define APBC_PWM11_CLK_RST 0xc4 ++#define APBC_PWM12_CLK_RST 0xc8 ++#define APBC_PWM13_CLK_RST 0xcc ++#define APBC_PWM14_CLK_RST 0xd0 ++#define APBC_PWM15_CLK_RST 0xd4 ++#define APBC_PWM16_CLK_RST 0xd8 ++#define APBC_PWM17_CLK_RST 0xdc ++#define APBC_PWM18_CLK_RST 0xe0 ++#define APBC_PWM19_CLK_RST 0xe4 ++ ++/* MPMU register offset */ ++#define MPMU_WDTPCR 0x200 ++ ++/* APMU register offset */ ++#define APMU_JPG_CLK_RES_CTRL 0x20 ++#define APMU_CSI_CCIC2_CLK_RES_CTRL 0x24 ++#define APMU_ISP_CLK_RES_CTRL 0x38 ++#define APMU_LCD_CLK_RES_CTRL1 0x44 ++#define APMU_LCD_SPI_CLK_RES_CTRL 0x48 ++#define APMU_LCD_CLK_RES_CTRL2 0x4c ++#define APMU_CCIC_CLK_RES_CTRL 0x50 ++#define APMU_SDH0_CLK_RES_CTRL 0x54 ++#define APMU_SDH1_CLK_RES_CTRL 0x58 ++#define APMU_USB_CLK_RES_CTRL 0x5c ++#define APMU_QSPI_CLK_RES_CTRL 0x60 ++#define APMU_USB_CLK_RES_CTRL 0x5c ++#define APMU_DMA_CLK_RES_CTRL 0x64 ++#define APMU_AES_CLK_RES_CTRL 0x68 ++#define APMU_VPU_CLK_RES_CTRL 0xa4 ++#define APMU_GPU_CLK_RES_CTRL 0xcc ++#define APMU_SDH2_CLK_RES_CTRL 0xe0 ++#define APMU_PMUA_MC_CTRL 0xe8 ++#define APMU_PMU_CC2_AP 0x100 ++#define APMU_PMUA_EM_CLK_RES_CTRL 0x104 ++#define APMU_AUDIO_CLK_RES_CTRL 0x14c ++#define APMU_HDMI_CLK_RES_CTRL 0x1B8 ++#define APMU_PCIE_CLK_RES_CTRL_0 0x3cc ++#define APMU_PCIE_CLK_RES_CTRL_1 0x3d4 ++#define APMU_PCIE_CLK_RES_CTRL_2 0x3dc ++#define APMU_EMAC0_CLK_RES_CTRL 0x3e4 ++#define APMU_EMAC1_CLK_RES_CTRL 0x3ec ++ ++/* APBC2 register offset */ ++#define APBC2_UART1_CLK_RST 0x00 ++#define APBC2_SSP2_CLK_RST 0x04 ++#define APBC2_TWSI3_CLK_RST 0x08 ++#define APBC2_RTC_CLK_RST 0x0c ++#define APBC2_TIMERS0_CLK_RST 0x10 ++#define APBC2_KPC_CLK_RST 0x14 ++#define APBC2_GPIO_CLK_RST 0x1c ++/* end of APBC2 register offset */ ++ ++/* RCPU register offset */ ++#define RCPU_HDMI_CLK_RST 0x2044 ++#define RCPU_CAN_CLK_RST 0x4c ++#define RCPU_I2C0_CLK_RST 0x30 ++#define RCPU_SSP0_CLK_RST 0x28 ++#define RCPU_IR_CLK_RST 0x48 ++#define RCPU_UART0_CLK_RST 0xd8 ++#define RCPU_UART1_CLK_RST 0x3c ++ ++/* RCPU2 register offset */ ++#define RCPU2_PWM0_CLK_RST 0x00 ++#define RCPU2_PWM1_CLK_RST 0x04 ++#define RCPU2_PWM2_CLK_RST 0x08 ++#define RCPU2_PWM3_CLK_RST 0x0c ++#define RCPU2_PWM4_CLK_RST 0x10 ++#define RCPU2_PWM5_CLK_RST 0x14 ++#define RCPU2_PWM6_CLK_RST 0x18 ++#define RCPU2_PWM7_CLK_RST 0x1c ++#define RCPU2_PWM8_CLK_RST 0x20 ++#define RCPU2_PWM9_CLK_RST 0x24 ++ ++enum spacemit_reset_base_type { ++ RST_BASE_TYPE_MPMU = 0, ++ RST_BASE_TYPE_APMU = 1, ++ RST_BASE_TYPE_APBC = 2, ++ RST_BASE_TYPE_APBS = 3, ++ RST_BASE_TYPE_CIU = 4, ++ RST_BASE_TYPE_DCIU = 5, ++ RST_BASE_TYPE_DDRC = 6, ++ RST_BASE_TYPE_AUDC = 7, ++ RST_BASE_TYPE_APBC2 = 8, ++ RST_BASE_TYPE_RCPU = 9, ++ RST_BASE_TYPE_RCPU2 = 10, ++}; ++ ++struct spacemit_reset_signal { ++ u32 offset; ++ u32 mask; ++ u32 deassert_val; ++ u32 assert_val; ++ enum spacemit_reset_base_type type; ++}; ++ ++struct spacemit_reset_variant { ++ const struct spacemit_reset_signal *signals; ++ u32 signals_num; ++ const struct reset_control_ops ops; ++}; ++ ++struct spacemit_reset { ++ spinlock_t *lock; ++ struct reset_controller_dev rcdev; ++ void __iomem *mpmu_base; ++ void __iomem *apmu_base; ++ void __iomem *apbc_base; ++ void __iomem *apbs_base; ++ void __iomem *ciu_base; ++ void __iomem *dciu_base; ++ void __iomem *ddrc_base; ++ void __iomem *audio_ctrl_base; ++ void __iomem *apbc2_base; ++ void __iomem *rcpu_base; ++ void __iomem *rcpu2_base; ++ const struct spacemit_reset_signal *signals; ++}; ++ ++/* for register access protection */ ++extern spinlock_t g_cru_lock; ++struct spacemit_reset k1x_reset_controller; ++ ++static const struct spacemit_reset_signal ++ k1x_reset_signals[RESET_NUMBER] = { ++ [RESET_UART1] = { APBC_UART1_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_UART2] = { APBC_UART2_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_GPIO] = { APBC_GPIO_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_PWM0] = { APBC_PWM0_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_PWM1] = { APBC_PWM1_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_PWM2] = { APBC_PWM2_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_PWM3] = { APBC_PWM3_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_PWM4] = { APBC_PWM4_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_PWM5] = { APBC_PWM5_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_PWM6] = { APBC_PWM6_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_PWM7] = { APBC_PWM7_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_PWM8] = { APBC_PWM8_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_PWM9] = { APBC_PWM9_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_PWM10] = { APBC_PWM10_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_PWM11] = { APBC_PWM11_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_PWM12] = { APBC_PWM12_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_PWM13] = { APBC_PWM13_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_PWM14] = { APBC_PWM14_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_PWM15] = { APBC_PWM15_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_PWM16] = { APBC_PWM16_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_PWM17] = { APBC_PWM17_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_PWM18] = { APBC_PWM18_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_PWM19] = { APBC_PWM19_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_SSP3] = { APBC_SSP3_CLK_RST, BIT(2), 0, BIT(2), ++ RST_BASE_TYPE_APBC }, ++ [RESET_UART3] = { APBC_UART3_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_RTC] = { APBC_RTC_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_TWSI0] = { APBC_TWSI0_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_TIMERS1] = { APBC_TIMERS1_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_AIB] = { APBC_AIB_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_TIMERS2] = { APBC_TIMERS2_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_ONEWIRE] = { APBC_ONEWIRE_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_SSPA0] = { APBC_SSPA0_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_SSPA1] = { APBC_SSPA1_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_DRO] = { APBC_DRO_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_IR] = { APBC_IR_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_TWSI1] = { APBC_TWSI1_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_TSEN] = { APBC_TSEN_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_TWSI2] = { APBC_TWSI2_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_TWSI4] = { APBC_TWSI4_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_TWSI5] = { APBC_TWSI5_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_TWSI6] = { APBC_TWSI6_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_TWSI7] = { APBC_TWSI7_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_TWSI8] = { APBC_TWSI8_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_IPC_AP2AUD] = { APBC_IPC_AP2AUD_CLK_RST, ++ BIT(2), 0, BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_UART4] = { APBC_UART4_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_UART5] = { APBC_UART5_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_UART6] = { APBC_UART6_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_UART7] = { APBC_UART7_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_UART8] = { APBC_UART8_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_UART9] = { APBC_UART9_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_CAN0] = { APBC_CAN0_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC }, ++ [RESET_WDT] = { MPMU_WDTPCR, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_MPMU }, ++ [RESET_JPG] = { APMU_JPG_CLK_RES_CTRL, BIT(0), ++ BIT(0), 0, RST_BASE_TYPE_APMU }, ++ [RESET_CSI] = { APMU_CSI_CCIC2_CLK_RES_CTRL, ++ BIT(1), BIT(1), 0, RST_BASE_TYPE_APMU }, ++ [RESET_CCIC2_PHY] = { APMU_CSI_CCIC2_CLK_RES_CTRL, ++ BIT(2), BIT(2), 0, RST_BASE_TYPE_APMU }, ++ [RESET_CCIC3_PHY] = { APMU_CSI_CCIC2_CLK_RES_CTRL, ++ BIT(29), BIT(29), 0, RST_BASE_TYPE_APMU }, ++ [RESET_ISP] = { APMU_ISP_CLK_RES_CTRL, BIT(0), ++ BIT(0), 0, RST_BASE_TYPE_APMU }, ++ [RESET_ISP_AHB] = { APMU_ISP_CLK_RES_CTRL, BIT(3), ++ BIT(3), 0, RST_BASE_TYPE_APMU }, ++ [RESET_ISP_CI] = { APMU_ISP_CLK_RES_CTRL, BIT(16), ++ BIT(16), 0, RST_BASE_TYPE_APMU }, ++ [RESET_ISP_CPP] = { APMU_ISP_CLK_RES_CTRL, BIT(27), ++ BIT(27), 0, RST_BASE_TYPE_APMU }, ++ [RESET_LCD] = { APMU_LCD_CLK_RES_CTRL1, BIT(4), ++ BIT(4), 0, RST_BASE_TYPE_APMU }, ++ [RESET_DSI_ESC] = { APMU_LCD_CLK_RES_CTRL1, BIT(3), ++ BIT(3), 0, RST_BASE_TYPE_APMU }, ++ [RESET_V2D] = { APMU_LCD_CLK_RES_CTRL1, BIT(27), ++ BIT(27), 0, RST_BASE_TYPE_APMU }, ++ [RESET_MIPI] = { APMU_LCD_CLK_RES_CTRL1, BIT(15), ++ BIT(15), 0, RST_BASE_TYPE_APMU }, ++ [RESET_LCD_SPI] = { APMU_LCD_SPI_CLK_RES_CTRL, ++ BIT(0), BIT(0), 0, RST_BASE_TYPE_APMU }, ++ [RESET_LCD_SPI_BUS] = { APMU_LCD_SPI_CLK_RES_CTRL, ++ BIT(4), BIT(4), 0, RST_BASE_TYPE_APMU }, ++ [RESET_LCD_SPI_HBUS] = { APMU_LCD_SPI_CLK_RES_CTRL, ++ BIT(2), BIT(2), 0, RST_BASE_TYPE_APMU }, ++ [RESET_LCD_MCLK] = { APMU_LCD_CLK_RES_CTRL2, BIT(9), ++ BIT(9), 0, RST_BASE_TYPE_APMU }, ++ [RESET_CCIC_4X] = { APMU_CCIC_CLK_RES_CTRL, ++ BIT(1), BIT(1), 0, RST_BASE_TYPE_APMU }, ++ [RESET_CCIC1_PHY] = { APMU_CCIC_CLK_RES_CTRL, ++ BIT(2), BIT(2), 0, RST_BASE_TYPE_APMU }, ++ [RESET_SDH_AXI] = { APMU_SDH0_CLK_RES_CTRL, BIT(0), ++ BIT(0), 0, RST_BASE_TYPE_APMU }, ++ [RESET_SDH0] = { APMU_SDH0_CLK_RES_CTRL, BIT(1), ++ BIT(1), 0, RST_BASE_TYPE_APMU }, ++ [RESET_SDH1] = { APMU_SDH1_CLK_RES_CTRL, BIT(1), ++ BIT(1), 0, RST_BASE_TYPE_APMU }, ++ [RESET_USB_AXI] = { APMU_USB_CLK_RES_CTRL, BIT(0), ++ BIT(0), 0, RST_BASE_TYPE_APMU }, ++ [RESET_USBP1_AXI] = { APMU_USB_CLK_RES_CTRL, BIT(4), ++ BIT(4), 0, RST_BASE_TYPE_APMU }, ++ [RESET_USB3_0] = { APMU_USB_CLK_RES_CTRL, ++ BIT(9) | BIT(10) | BIT(11), BIT(9) | BIT(10) | BIT(11), ++ 0, RST_BASE_TYPE_APMU }, ++ [RESET_QSPI] = { APMU_QSPI_CLK_RES_CTRL, BIT(1), ++ BIT(1), 0, RST_BASE_TYPE_APMU }, ++ [RESET_QSPI_BUS] = { APMU_QSPI_CLK_RES_CTRL, BIT(0), ++ BIT(0), 0, RST_BASE_TYPE_APMU }, ++ [RESET_DMA] = { APMU_DMA_CLK_RES_CTRL, BIT(0), ++ BIT(0), 0, RST_BASE_TYPE_APMU }, ++ [RESET_AES] = { APMU_AES_CLK_RES_CTRL, BIT(4), ++ BIT(4), 0, RST_BASE_TYPE_APMU }, ++ [RESET_VPU] = { APMU_VPU_CLK_RES_CTRL, BIT(0), ++ BIT(0), 0, RST_BASE_TYPE_APMU }, ++ [RESET_GPU] = { APMU_GPU_CLK_RES_CTRL, BIT(1), ++ BIT(1), 0, RST_BASE_TYPE_APMU }, ++ [RESET_SDH2] = { APMU_SDH2_CLK_RES_CTRL, BIT(1), ++ BIT(1), 0, RST_BASE_TYPE_APMU }, ++ [RESET_MC] = { APMU_PMUA_MC_CTRL, BIT(0), ++ BIT(0), 0, RST_BASE_TYPE_APMU }, ++ [RESET_EM_AXI] = { APMU_PMUA_EM_CLK_RES_CTRL, BIT(0), ++ BIT(0), 0, RST_BASE_TYPE_APMU }, ++ [RESET_EM] = { APMU_PMUA_EM_CLK_RES_CTRL, BIT(1), ++ BIT(1), 0, RST_BASE_TYPE_APMU }, ++ [RESET_AUDIO_SYS] = { APMU_AUDIO_CLK_RES_CTRL, ++ BIT(0) | BIT(2) | BIT(3), BIT(0) | BIT(2) | BIT(3), ++ 0, RST_BASE_TYPE_APMU }, ++ [RESET_HDMI] = { APMU_HDMI_CLK_RES_CTRL, BIT(9), ++ BIT(9), 0, RST_BASE_TYPE_APMU }, ++ [RESET_PCIE0] = { APMU_PCIE_CLK_RES_CTRL_0, ++ BIT(3) | BIT(4) | BIT(5) | BIT(8), ++ BIT(3) | BIT(4) | BIT(5), BIT(8), ++ RST_BASE_TYPE_APMU }, ++ [RESET_PCIE1] = { APMU_PCIE_CLK_RES_CTRL_1, ++ BIT(3) | BIT(4) | BIT(5) | BIT(8), ++ BIT(3) | BIT(4) | BIT(5), BIT(8), ++ RST_BASE_TYPE_APMU }, ++ [RESET_PCIE2] = { APMU_PCIE_CLK_RES_CTRL_2, ++ 0x138, 0x38, 0x100, RST_BASE_TYPE_APMU }, ++ [RESET_EMAC0] = { APMU_EMAC0_CLK_RES_CTRL, BIT(1), ++ BIT(1), 0, RST_BASE_TYPE_APMU }, ++ [RESET_EMAC1] = { APMU_EMAC1_CLK_RES_CTRL, BIT(1), ++ BIT(1), 0, RST_BASE_TYPE_APMU }, ++ [RESET_SEC_UART1] = { APBC2_UART1_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC2 }, ++ [RESET_SEC_SSP2] = { APBC2_SSP2_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC2 }, ++ [RESET_SEC_TWSI3] = { APBC2_TWSI3_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC2 }, ++ [RESET_SEC_RTC] = { APBC2_RTC_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC2 }, ++ [RESET_SEC_TIMERS0] = { APBC2_TIMERS0_CLK_RST, ++ BIT(2), 0, BIT(2), RST_BASE_TYPE_APBC2 }, ++ [RESET_SEC_KPC] = { APBC2_KPC_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC2 }, ++ [RESET_SEC_GPIO] = { APBC2_GPIO_CLK_RST, BIT(2), 0, ++ BIT(2), RST_BASE_TYPE_APBC2 }, ++ [RESET_RCPU_HDMIAUDIO] = { RCPU_HDMI_CLK_RST, BIT(0), ++ BIT(0), 0, RST_BASE_TYPE_RCPU }, ++ [RESET_RCPU_CAN] = { RCPU_CAN_CLK_RST, BIT(0), ++ BIT(0), 0, RST_BASE_TYPE_RCPU }, ++ ++ [RESET_RCPU_I2C0] = { RCPU_I2C0_CLK_RST, BIT(0), ++ BIT(0), 0, RST_BASE_TYPE_RCPU }, ++ [RESET_RCPU_SSP0] = { RCPU_SSP0_CLK_RST, BIT(0), ++ BIT(0), 0, RST_BASE_TYPE_RCPU }, ++ [RESET_RCPU_IR] = { RCPU_IR_CLK_RST, BIT(0), ++ BIT(0), 0, RST_BASE_TYPE_RCPU }, ++ [RESET_RCPU_UART0] = { RCPU_UART0_CLK_RST, BIT(0), ++ BIT(0), 0, RST_BASE_TYPE_RCPU }, ++ [RESET_RCPU_UART1] = { RCPU_UART1_CLK_RST, BIT(0), ++ BIT(0), 0, RST_BASE_TYPE_RCPU }, ++ ++ [RESET_RCPU2_PWM0] = { RCPU2_PWM0_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_RCPU2 }, ++ [RESET_RCPU2_PWM1] = { RCPU2_PWM1_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_RCPU2 }, ++ [RESET_RCPU2_PWM2] = { RCPU2_PWM2_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_RCPU2 }, ++ [RESET_RCPU2_PWM3] = { RCPU2_PWM3_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_RCPU2 }, ++ [RESET_RCPU2_PWM4] = { RCPU2_PWM4_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_RCPU2 }, ++ [RESET_RCPU2_PWM5] = { RCPU2_PWM5_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_RCPU2 }, ++ [RESET_RCPU2_PWM6] = { RCPU2_PWM6_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_RCPU2 }, ++ [RESET_RCPU2_PWM7] = { RCPU2_PWM7_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_RCPU2 }, ++ [RESET_RCPU2_PWM8] = { RCPU2_PWM8_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_RCPU2 }, ++ [RESET_RCPU2_PWM9] = { RCPU2_PWM9_CLK_RST, BIT(2) | BIT(0), ++ BIT(0), BIT(2), RST_BASE_TYPE_RCPU2 }, ++}; ++ ++static struct spacemit_reset *to_spacemit_reset(struct reset_controller_dev *rcdev) ++{ ++ return container_of(rcdev, struct spacemit_reset, rcdev); ++} ++ ++static u32 spacemit_reset_read(struct spacemit_reset *reset, u32 id) ++{ ++ void __iomem *base; ++ ++ switch (reset->signals[id].type) { ++ case RST_BASE_TYPE_MPMU: ++ base = reset->mpmu_base; ++ break; ++ case RST_BASE_TYPE_APMU: ++ base = reset->apmu_base; ++ break; ++ case RST_BASE_TYPE_APBC: ++ base = reset->apbc_base; ++ break; ++ case RST_BASE_TYPE_APBS: ++ base = reset->apbs_base; ++ break; ++ case RST_BASE_TYPE_CIU: ++ base = reset->ciu_base; ++ break; ++ case RST_BASE_TYPE_DCIU: ++ base = reset->dciu_base; ++ break; ++ case RST_BASE_TYPE_DDRC: ++ base = reset->ddrc_base; ++ break; ++ case RST_BASE_TYPE_AUDC: ++ base = reset->audio_ctrl_base; ++ break; ++ case RST_BASE_TYPE_APBC2: ++ base = reset->apbc2_base; ++ break; ++ case RST_BASE_TYPE_RCPU: ++ base = reset->rcpu_base; ++ break; ++ case RST_BASE_TYPE_RCPU2: ++ base = reset->rcpu2_base; ++ break; ++ default: ++ base = reset->apbc_base; ++ break; ++ } ++ ++ return readl(base + reset->signals[id].offset); ++} ++ ++static void spacemit_reset_write(struct spacemit_reset *reset, u32 value, u32 id) ++{ ++ void __iomem *base; ++ ++ switch (reset->signals[id].type) { ++ case RST_BASE_TYPE_MPMU: ++ base = reset->mpmu_base; ++ break; ++ case RST_BASE_TYPE_APMU: ++ base = reset->apmu_base; ++ break; ++ case RST_BASE_TYPE_APBC: ++ base = reset->apbc_base; ++ break; ++ case RST_BASE_TYPE_APBS: ++ base = reset->apbs_base; ++ break; ++ case RST_BASE_TYPE_CIU: ++ base = reset->ciu_base; ++ break; ++ case RST_BASE_TYPE_DCIU: ++ base = reset->dciu_base; ++ break; ++ case RST_BASE_TYPE_DDRC: ++ base = reset->ddrc_base; ++ break; ++ case RST_BASE_TYPE_AUDC: ++ base = reset->audio_ctrl_base; ++ break; ++ case RST_BASE_TYPE_APBC2: ++ base = reset->apbc2_base; ++ break; ++ case RST_BASE_TYPE_RCPU: ++ base = reset->rcpu_base; ++ break; ++ case RST_BASE_TYPE_RCPU2: ++ base = reset->rcpu2_base; ++ break; ++ default: ++ base = reset->apbc_base; ++ break; ++ } ++ ++ writel(value, base + reset->signals[id].offset); ++} ++ ++static void spacemit_reset_set(struct reset_controller_dev *rcdev, u32 id, bool assert) ++{ ++ u32 value; ++ struct spacemit_reset *reset = to_spacemit_reset(rcdev); ++ ++ value = spacemit_reset_read(reset, id); ++ if (assert) { ++ value &= ~reset->signals[id].mask; ++ value |= reset->signals[id].assert_val; ++ } else { ++ value &= ~reset->signals[id].mask; ++ value |= reset->signals[id].deassert_val; ++ } ++ spacemit_reset_write(reset, value, id); ++} ++ ++static int spacemit_reset_update(struct reset_controller_dev *rcdev, ++ unsigned long id, bool assert) ++{ ++ unsigned long flags; ++ struct spacemit_reset *reset = to_spacemit_reset(rcdev); ++ ++ if (id < RESET_UART1 || id >= RESET_NUMBER) ++ return 0; ++ ++ if (id == RESET_TWSI8) ++ return 0; ++ ++ spin_lock_irqsave(reset->lock, flags); ++ if (assert) ++ spacemit_reset_set(rcdev, id, assert); ++ else ++ spacemit_reset_set(rcdev, id, assert); ++ spin_unlock_irqrestore(reset->lock, flags); ++ ++ return 0; ++} ++ ++static int spacemit_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) ++{ ++ return spacemit_reset_update(rcdev, id, true); ++} ++ ++static int spacemit_reset_deassert(struct reset_controller_dev *rcdev, ++ unsigned long id) ++{ ++ return spacemit_reset_update(rcdev, id, false); ++} ++ ++static const struct spacemit_reset_variant k1x_reset_data = { ++ .signals = k1x_reset_signals, ++ .signals_num = ARRAY_SIZE(k1x_reset_signals), ++ .ops = { ++ .assert = spacemit_reset_assert, ++ .deassert = spacemit_reset_deassert, ++ }, ++}; ++ ++static void spacemit_reset_init(struct device_node *np) ++{ ++ struct spacemit_reset *reset; ++ ++ if (of_device_is_compatible(np, "spacemit,k1x-reset")) { ++ reset = &k1x_reset_controller; ++ reset->mpmu_base = of_iomap(np, 0); ++ if (!reset->mpmu_base) { ++ pr_err("failed to map mpmu registers\n"); ++ goto out; ++ } ++ ++ reset->apmu_base = of_iomap(np, 1); ++ if (!reset->apmu_base) { ++ pr_err("failed to map apmu registers\n"); ++ goto out; ++ } ++ ++ reset->apbc_base = of_iomap(np, 2); ++ if (!reset->apbc_base) { ++ pr_err("failed to map apbc registers\n"); ++ goto out; ++ } ++ ++ reset->apbs_base = of_iomap(np, 3); ++ if (!reset->apbs_base) { ++ pr_err("failed to map apbs registers\n"); ++ goto out; ++ } ++ ++ reset->ciu_base = of_iomap(np, 4); ++ if (!reset->ciu_base) { ++ pr_err("failed to map ciu registers\n"); ++ goto out; ++ } ++ ++ reset->dciu_base = of_iomap(np, 5); ++ if (!reset->dciu_base) { ++ pr_err("failed to map dragon ciu registers\n"); ++ goto out; ++ } ++ ++ reset->ddrc_base = of_iomap(np, 6); ++ if (!reset->ddrc_base) { ++ pr_err("failed to map ddrc registers\n"); ++ goto out; ++ } ++ ++ reset->apbc2_base = of_iomap(np, 7); ++ if (!reset->apbc2_base) { ++ pr_err("failed to map apbc2 registers\n"); ++ goto out; ++ } ++ ++ reset->rcpu_base = of_iomap(np, 8); ++ if (!reset->rcpu_base) { ++ pr_err("failed to map rcpu registers\n"); ++ goto out; ++ } ++ ++ reset->rcpu2_base = of_iomap(np, 9); ++ if (!reset->rcpu2_base) { ++ pr_err("failed to map rcpu2 registers\n"); ++ goto out; ++ } ++ } else { ++ pr_err("not spacemit,k1x-reset\n"); ++ goto out; ++ } ++ ++ reset->lock = &g_cru_lock; ++ reset->signals = k1x_reset_data.signals; ++ reset->rcdev.owner = THIS_MODULE; ++ reset->rcdev.nr_resets = k1x_reset_data.signals_num; ++ reset->rcdev.ops = &k1x_reset_data.ops; ++ reset->rcdev.of_node = np; ++ reset_controller_register(&reset->rcdev); ++out: ++ return; ++} ++ ++CLK_OF_DECLARE(k1x_reset, "spacemit,k1x-reset", spacemit_reset_init); ++ diff --git a/drivers/reset/reset-th1520.c b/drivers/reset/reset-th1520.c new file mode 100644 index 000000000000..06f82be725be @@ -450277,10 +495828,27 @@ index 000000000000..b985c42d08c1 +MODULE_DESCRIPTION("XuanTie TH1520 mailbox IPC client driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig -index 6f270577df86..2c96112594ed 100644 +index 6f270577df86..f5c257c73b5e 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig -@@ -1861,7 +1861,7 @@ config RTC_DRV_MT7622 +@@ -718,6 +718,16 @@ config RTC_DRV_SD3078 + This driver can also be built as a module. If so, the module + will be called rtc-sd3078 + ++config RTC_DRV_SPACEMIT_P1 ++ tristate "Spacemit P1 rtc module" ++ depends on MFD_SPACEMIT_P1 ++ help ++ If you say yes here you get support for RTC function in Spacemit ++ P1 chips. ++ ++ This driver can also be built as a module. If so, the module ++ will be called rtc-spacemit-p1. ++ + endif # I2C + + comment "SPI RTC drivers" +@@ -1861,7 +1871,7 @@ config RTC_DRV_MT7622 config RTC_DRV_XGENE tristate "APM X-Gene RTC" depends on HAS_IOMEM @@ -450289,7 +495857,7 @@ index 6f270577df86..2c96112594ed 100644 help If you say yes here you get support for the APM X-Gene SoC real time clock. -@@ -1869,6 +1869,16 @@ config RTC_DRV_XGENE +@@ -1869,6 +1879,16 @@ config RTC_DRV_XGENE This driver can also be built as a module, if so, the module will be called "rtc-xgene". @@ -450306,7 +495874,7 @@ index 6f270577df86..2c96112594ed 100644 config RTC_DRV_PIC32 tristate "Microchip PIC32 RTC" depends on MACH_PIC32 -@@ -1992,4 +2002,10 @@ config RTC_DRV_POLARFIRE_SOC +@@ -1992,4 +2012,10 @@ config RTC_DRV_POLARFIRE_SOC This driver can also be built as a module, if so, the module will be called "rtc-mpfs". @@ -450318,14 +495886,15 @@ index 6f270577df86..2c96112594ed 100644 + endif # RTC_CLASS diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile -index 7711f79787ac..441b27e2d8fd 100644 +index 7711f79787ac..94b84f3b2864 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile -@@ -189,3 +189,4 @@ obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o +@@ -189,3 +189,5 @@ obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o obj-$(CONFIG_RTC_DRV_XGENE) += rtc-xgene.o obj-$(CONFIG_RTC_DRV_ZYNQMP) += rtc-zynqmp.o +obj-$(CONFIG_RTC_DRV_ASTBMC) += rtc-astbmc.o ++obj-$(CONFIG_RTC_DRV_SPACEMIT_P1) += rtc-spacemit-p1.o diff --git a/drivers/rtc/rtc-astbmc.c b/drivers/rtc/rtc-astbmc.c new file mode 100644 index 000000000000..8b56090cf0f8 @@ -450867,6 +496436,728 @@ index 000000000000..8b56090cf0f8 +MODULE_AUTHOR("Ryan Chen "); +MODULE_DESCRIPTION("ASPEED Host BMC DEVICE Driver"); +MODULE_LICENSE("GPL"); +diff --git a/drivers/rtc/rtc-spacemit-p1.c b/drivers/rtc/rtc-spacemit-p1.c +new file mode 100644 +index 000000000000..da52ded698ba +--- /dev/null ++++ b/drivers/rtc/rtc-spacemit-p1.c +@@ -0,0 +1,716 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * rtc driver for Spacemit P1 ++ * ++ * Copyright (c) 2023, Spacemit Co., Ltd ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* rtc */ ++union rtc_ctl_desc { ++ unsigned int val; ++ struct { ++ unsigned int crystal_en:1; ++ unsigned int out_32k_en:1; ++ unsigned int rtc_en:1; ++ unsigned int rtc_clk_sel:1; ++ unsigned int tick_type:1; ++ unsigned int alarm_en:1; ++ unsigned int tick_en:1; ++ unsigned int reserved:25; ++ } bits; ++}; ++ ++struct rtc_regdesc { ++ /* seconds */ ++ struct { ++ unsigned char reg; ++ unsigned char msk; ++ } cnt_s; ++ ++ /* mini */ ++ struct { ++ unsigned char reg; ++ unsigned char msk; ++ } cnt_mi; ++ ++ /* hour */ ++ struct { ++ unsigned char reg; ++ unsigned char msk; ++ } cnt_h; ++ ++ /* day */ ++ struct { ++ unsigned char reg; ++ unsigned char msk; ++ } cnt_d; ++ ++ /* mounth */ ++ struct { ++ unsigned char reg; ++ unsigned char msk; ++ } cnt_mo; ++ ++ /* year */ ++ struct { ++ unsigned char reg; ++ unsigned char msk; ++ } cnt_y; ++ ++ struct { ++ unsigned char reg; ++ unsigned char msk; ++ } alarm_s; ++ ++ struct { ++ unsigned char reg; ++ unsigned char msk; ++ } alarm_mi; ++ ++ struct { ++ unsigned char reg; ++ unsigned char msk; ++ } alarm_h; ++ ++ struct { ++ unsigned char reg; ++ unsigned char msk; ++ } alarm_d; ++ ++ struct { ++ unsigned char reg; ++ unsigned char msk; ++ } alarm_mo; ++ ++ struct { ++ unsigned char reg; ++ unsigned char msk; ++ } alarm_y; ++ ++ struct { ++ unsigned char reg; ++ } rtc_ctl; ++}; ++ ++static const struct rtc_regdesc spm_p1_regdesc = { ++ .cnt_s = { ++ .reg = 0xd, ++ .msk = 0x3f, ++ }, ++ ++ .cnt_mi = { ++ .reg = 0xe, ++ .msk = 0x3f, ++ }, ++ ++ .cnt_h = { ++ .reg = 0xf, ++ .msk = 0x1f, ++ }, ++ ++ .cnt_d = { ++ .reg = 0x10, ++ .msk = 0x1f, ++ }, ++ ++ .cnt_mo = { ++ .reg = 0x11, ++ .msk = 0xf, ++ }, ++ ++ .cnt_y = { ++ .reg = 0x12, ++ .msk = 0x3f, ++ }, ++ ++ .alarm_s = { ++ .reg = 0x13, ++ .msk = 0x3f, ++ }, ++ ++ .alarm_mi = { ++ .reg = 0x14, ++ .msk = 0x3f, ++ }, ++ ++ .alarm_h = { ++ .reg = 0x15, ++ .msk = 0x1f, ++ }, ++ ++ .alarm_d = { ++ .reg = 0x16, ++ .msk = 0x1f, ++ }, ++ ++ .alarm_mo = { ++ .reg = 0x17, ++ .msk = 0xf, ++ }, ++ ++ .alarm_y = { ++ .reg = 0x18, ++ .msk = 0x3f, ++ }, ++ ++ .rtc_ctl = { ++ .reg = 0x1d, ++ }, ++}; ++ ++struct spm_p1_rtc { ++ int irq; ++ struct device *dev; ++ struct regmap *regmap; ++ struct rtc_device *rtc_dev; ++ struct rtc_regdesc *desc; ++}; ++ ++static irqreturn_t spt_rtc_irq(int irq, void *_pwr) ++{ ++ struct spm_p1_rtc *rtc = (struct spm_p1_rtc *)_pwr; ++ ++ rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF); ++ ++ return IRQ_HANDLED; ++} ++ ++static int spt_rtc_read_time(struct device *dev, struct rtc_time *tm) ++{ ++ int ret; ++ unsigned int v[6], pre_v[6] = {0}; ++ struct spm_p1_rtc *rtc = dev_get_drvdata(dev); ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->cnt_s.reg, &pre_v[0]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read second: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->cnt_mi.reg, &pre_v[1]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read minute: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->cnt_h.reg, &pre_v[2]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read hour: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->cnt_d.reg, &pre_v[3]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read day: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->cnt_mo.reg, &pre_v[4]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read month: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->cnt_y.reg, &pre_v[5]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read year: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ while (1) { ++ ret = regmap_read(rtc->regmap, rtc->desc->cnt_s.reg, &v[0]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read second: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->cnt_mi.reg, &v[1]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read minute: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->cnt_h.reg, &v[2]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read hour: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->cnt_d.reg, &v[3]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read day: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->cnt_mo.reg, &v[4]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read month: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->cnt_y.reg, &v[5]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read year: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ if ((pre_v[0] == v[0]) && (pre_v[1] == v[1]) && ++ (pre_v[2] == v[2]) && (pre_v[3] == v[3]) && ++ (pre_v[4] == v[4]) && (pre_v[5] == v[5])) ++ break; ++ ++ pre_v[0] = v[0]; ++ pre_v[1] = v[1]; ++ pre_v[2] = v[2]; ++ pre_v[3] = v[3]; ++ pre_v[4] = v[4]; ++ pre_v[5] = v[5]; ++ } ++ ++ tm->tm_sec = v[0] & rtc->desc->cnt_s.msk; ++ tm->tm_min = v[1] & rtc->desc->cnt_mi.msk; ++ tm->tm_hour = v[2] & rtc->desc->cnt_h.msk; ++ tm->tm_mday = (v[3] & rtc->desc->cnt_d.msk) + 1; ++ tm->tm_mon = (v[4] & rtc->desc->cnt_mo.msk); ++ tm->tm_year = (v[5] & rtc->desc->cnt_y.msk) + 100; ++ ++ pr_debug("%s:%d, s:%d, min:%d, hour:%d, mday:%d, month:%d, year:%d\n", ++ __func__, __LINE__, ++ tm->tm_sec, ++ tm->tm_min, ++ tm->tm_hour, ++ tm->tm_mday, ++ tm->tm_mon, ++ tm->tm_year); ++ ++ return 0; ++} ++ ++static int spt_rtc_set_time(struct device *dev, struct rtc_time *tm) ++{ ++ int ret; ++ unsigned int v[6]; ++ union rtc_ctl_desc rtc_ctl; ++ struct spm_p1_rtc *rtc = dev_get_drvdata(dev); ++ ++ pr_debug("%s:%d, s:%d, min:%d, hour:%d, mday:%d, month:%d, year:%d\n", __func__, __LINE__, ++ tm->tm_sec, tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year); ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->rtc_ctl.reg, (unsigned int *)&rtc_ctl.val); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read rtc ctrl: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ /* disable rtc first */ ++ rtc_ctl.bits.rtc_en = 0; ++ ++ ret = regmap_write_bits(rtc->regmap, rtc->desc->rtc_ctl.reg, ++ 0xff, rtc_ctl.val); ++ if (ret) { ++ dev_err(rtc->dev, "failed to set rtc ctrl register: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ while (1) { ++ ret = regmap_write_bits(rtc->regmap, rtc->desc->cnt_s.reg, ++ rtc->desc->cnt_s.msk, tm->tm_sec); ++ if (ret) { ++ dev_err(rtc->dev, "failed to update second: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_write_bits(rtc->regmap, rtc->desc->cnt_mi.reg, ++ rtc->desc->cnt_mi.msk, tm->tm_min); ++ if (ret) { ++ dev_err(rtc->dev, "failed to update minutes: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_write_bits(rtc->regmap, rtc->desc->cnt_h.reg, ++ rtc->desc->cnt_h.msk, tm->tm_hour); ++ if (ret) { ++ dev_err(rtc->dev, "failed to update hour: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_write_bits(rtc->regmap, rtc->desc->cnt_d.reg, ++ rtc->desc->cnt_d.msk, tm->tm_mday - 1); ++ if (ret) { ++ dev_err(rtc->dev, "failed to update day: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_write_bits(rtc->regmap, rtc->desc->cnt_mo.reg, ++ rtc->desc->cnt_mo.msk, tm->tm_mon); ++ if (ret) { ++ dev_err(rtc->dev, "failed to update month: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_write_bits(rtc->regmap, rtc->desc->cnt_y.reg, ++ rtc->desc->cnt_y.msk, tm->tm_year - 100); ++ if (ret) { ++ dev_err(rtc->dev, "failed to update month: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ /* read again */ ++ ret = regmap_read(rtc->regmap, rtc->desc->cnt_s.reg, &v[0]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read second: %d\n", ret); ++ return -EINVAL; ++ } ++ v[0] = v[0] & rtc->desc->cnt_s.msk; ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->cnt_mi.reg, &v[1]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read minute: %d\n", ret); ++ return -EINVAL; ++ } ++ v[1] = v[1] & rtc->desc->cnt_mi.msk; ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->cnt_h.reg, &v[2]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read hour: %d\n", ret); ++ return -EINVAL; ++ } ++ v[2] = v[2] & rtc->desc->cnt_h.msk; ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->cnt_d.reg, &v[3]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read day: %d\n", ret); ++ return -EINVAL; ++ } ++ v[3] = v[3] & rtc->desc->cnt_d.msk; ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->cnt_mo.reg, &v[4]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read month: %d\n", ret); ++ return -EINVAL; ++ } ++ v[4] = v[4] & rtc->desc->cnt_mo.msk; ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->cnt_y.reg, &v[5]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read year: %d\n", ret); ++ return -EINVAL; ++ } ++ v[5] = v[5] & rtc->desc->cnt_y.msk; ++ ++ if ((v[0] == (rtc->desc->cnt_s.msk & tm->tm_sec)) && ++ (v[1] == (rtc->desc->cnt_mi.msk & tm->tm_min)) && ++ (v[2] == (rtc->desc->cnt_h.msk & tm->tm_hour)) && ++ ((v[3] + 1) == (rtc->desc->cnt_d.msk & tm->tm_mday)) && ++ (v[4] == (rtc->desc->cnt_mo.msk & tm->tm_mon)) && ++ (v[5] == (rtc->desc->cnt_y.msk & (tm->tm_year - 100)))) ++ break; ++ } ++ ++ /* enable rtc last */ ++ rtc_ctl.bits.rtc_en = 1; ++ ++ ret = regmap_write_bits(rtc->regmap, rtc->desc->rtc_ctl.reg, ++ 0xff, rtc_ctl.val); ++ if (ret) { ++ dev_err(rtc->dev, "failed to set rtc ctrl register: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int spt_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) ++{ ++ int ret; ++ unsigned int v[6]; ++ union rtc_ctl_desc rtc_ctl; ++ struct spm_p1_rtc *rtc = dev_get_drvdata(dev); ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->alarm_s.reg, &v[0]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read alarm second: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->alarm_mi.reg, &v[1]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read alarm minute: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->alarm_h.reg, &v[2]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read alarm hour: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->alarm_d.reg, &v[3]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read alarm day: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->alarm_mo.reg, &v[4]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read alarm month: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->alarm_y.reg, &v[5]); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read alarm year: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ /* 2000:1:1:0:0:0 */ ++ alrm->time.tm_sec = v[0] & rtc->desc->alarm_s.msk; ++ alrm->time.tm_min = v[1] & rtc->desc->alarm_mi.msk; ++ alrm->time.tm_hour = v[2] & rtc->desc->alarm_h.msk; ++ alrm->time.tm_mday = (v[3] & rtc->desc->alarm_d.msk) + 1; ++ alrm->time.tm_mon = (v[4] & rtc->desc->alarm_mo.msk); ++ alrm->time.tm_year = (v[5] & rtc->desc->alarm_y.msk) + 100; ++ ++ pr_debug("%s:%d, s:%d, min:%d, hour:%d, mday:%d, month:%d, year:%d\n", __func__, __LINE__, ++ alrm->time.tm_sec, alrm->time.tm_min, alrm->time.tm_hour, ++ alrm->time.tm_mday, alrm->time.tm_mon, alrm->time.tm_year); ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->rtc_ctl.reg, (unsigned int *)&rtc_ctl.val); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read alarm second: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ alrm->enabled = rtc_ctl.bits.alarm_en; ++ ++ return 0; ++} ++ ++static int spt_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) ++{ ++ int ret; ++ union rtc_ctl_desc rtc_ctl; ++ struct spm_p1_rtc *rtc = dev_get_drvdata(dev); ++ ++ pr_debug("%s:%d, s:%d, min:%d, hour:%d, mday:%d, month:%d, year:%d\n", __func__, __LINE__, ++ alrm->time.tm_sec, alrm->time.tm_min, alrm->time.tm_hour, ++ alrm->time.tm_mday, alrm->time.tm_mon, alrm->time.tm_year); ++ ++ /* disable the alrm function first */ ++ ret = regmap_read(rtc->regmap, rtc->desc->rtc_ctl.reg, (unsigned int *)&rtc_ctl.val); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read rtc ctrl register: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ rtc_ctl.bits.alarm_en = 0; ++ ++ ret = regmap_write_bits(rtc->regmap, rtc->desc->rtc_ctl.reg, ++ 0xff, rtc_ctl.val); ++ if (ret) { ++ dev_err(rtc->dev, "failed to set rtc ctrl register: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_write_bits(rtc->regmap, rtc->desc->alarm_s.reg, ++ rtc->desc->alarm_s.msk, alrm->time.tm_sec); ++ if (ret) { ++ dev_err(rtc->dev, "failed to update alrm second: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_write_bits(rtc->regmap, rtc->desc->alarm_mi.reg, ++ rtc->desc->alarm_mi.msk, alrm->time.tm_min); ++ if (ret) { ++ dev_err(rtc->dev, "failed to update alarm minutes: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_write_bits(rtc->regmap, rtc->desc->alarm_h.reg, ++ rtc->desc->alarm_h.msk, alrm->time.tm_hour); ++ if (ret) { ++ dev_err(rtc->dev, "failed to update alarm hour: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_write_bits(rtc->regmap, rtc->desc->alarm_d.reg, ++ rtc->desc->alarm_d.msk, alrm->time.tm_mday - 1); ++ if (ret) { ++ dev_err(rtc->dev, "failed to update alarm day: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_write_bits(rtc->regmap, rtc->desc->alarm_mo.reg, ++ rtc->desc->alarm_mo.msk, alrm->time.tm_mon); ++ if (ret) { ++ dev_err(rtc->dev, "failed to update alarm month: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ ret = regmap_write_bits(rtc->regmap, rtc->desc->alarm_y.reg, ++ rtc->desc->alarm_y.msk, alrm->time.tm_year - 100); ++ if (ret) { ++ dev_err(rtc->dev, "failed to update month: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ if (alrm->enabled) { ++ rtc_ctl.bits.alarm_en = 1; ++ ++ ret = regmap_write_bits(rtc->regmap, rtc->desc->rtc_ctl.reg, ++ 0xff, rtc_ctl.val); ++ if (ret) { ++ dev_err(rtc->dev, "failed to set rtc ctrl register: %d\n", ret); ++ return -EINVAL; ++ } ++ } ++ ++ return 0; ++} ++ ++static int spt_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) ++{ ++ int ret; ++ union rtc_ctl_desc rtc_ctl; ++ struct spm_p1_rtc *rtc = dev_get_drvdata(dev); ++ ++ ret = regmap_read(rtc->regmap, rtc->desc->rtc_ctl.reg, (unsigned int *)&rtc_ctl.val); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read rtc ctrl register: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ if (enabled) ++ rtc_ctl.bits.alarm_en = 1; ++ else ++ rtc_ctl.bits.alarm_en = 0; ++ ++ ret = regmap_write_bits(rtc->regmap, rtc->desc->rtc_ctl.reg, ++ 0xff, rtc_ctl.val); ++ if (ret) { ++ dev_err(rtc->dev, "failed to set rtc ctrl register: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static const struct rtc_class_ops spt_rtc_ops = { ++ .read_time = spt_rtc_read_time, ++ .set_time = spt_rtc_set_time, ++ .read_alarm = spt_rtc_read_alarm, ++ .set_alarm = spt_rtc_set_alarm, ++ .alarm_irq_enable = spt_rtc_alarm_irq_enable, ++}; ++ ++static const struct of_device_id spm_p1_rtc_id_table[] = { ++ { .compatible = "spacemit,p1,rtc", .data = &spm_p1_regdesc }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, spm_p1_rtc_id_table); ++ ++static int spm_p1_rtc_probe(struct platform_device *pdev) ++{ ++ int ret = 0; ++ struct spm_p1_rtc *rtc; ++ union rtc_ctl_desc rtc_ctl; ++ const struct of_device_id *of_id; ++ struct spacemit_pmic *pmic = dev_get_drvdata(pdev->dev.parent); ++ ++ rtc = devm_kzalloc(&pdev->dev, sizeof(struct spm_p1_rtc), GFP_KERNEL); ++ if (!rtc) ++ return -ENOMEM; ++ ++ of_id = of_match_device(spm_p1_rtc_id_table, &pdev->dev); ++ if (!of_id) { ++ pr_err("Unable to match OF ID\n"); ++ return -ENODEV; ++ } ++ ++ rtc->regmap = pmic->regmap; ++ rtc->dev = &pdev->dev; ++ rtc->desc = (struct rtc_regdesc *)of_id->data; ++ rtc->irq = platform_get_irq(pdev, 0); ++ if (rtc->irq < 0) { ++ dev_err(&pdev->dev, "get rtc irq error: %d\n", rtc->irq); ++ return -EINVAL; ++ } ++ ++ dev_set_drvdata(&pdev->dev, rtc); ++ ++ ret = devm_request_any_context_irq(&pdev->dev, rtc->irq, ++ spt_rtc_irq, ++ IRQF_TRIGGER_NONE | IRQF_ONESHOT, ++ "rtc@pmic", rtc); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "Can't register rtc irq: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ dev_pm_set_wake_irq(&pdev->dev, rtc->irq); ++ device_init_wakeup(&pdev->dev, 1); ++ ++ rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev); ++ if (IS_ERR(rtc->rtc_dev)) ++ return PTR_ERR(rtc->rtc_dev); ++ ++ rtc->rtc_dev->ops = &spt_rtc_ops; ++ rtc->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_2000; ++ rtc->rtc_dev->range_max = RTC_TIMESTAMP_END_2063; ++ ++ ret = devm_rtc_register_device(rtc->rtc_dev); ++ if (ret) { ++ dev_err(&pdev->dev, "register rtc device error: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ /* enable the rtc function */ ++ ret = regmap_read(rtc->regmap, rtc->desc->rtc_ctl.reg, (unsigned int *)&rtc_ctl.val); ++ if (ret) { ++ dev_err(rtc->dev, "failed to read rtc ctrl register: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ /* internal 32k clk */ ++ rtc_ctl.bits.rtc_clk_sel = 1; ++ /* enable rtc */ ++ rtc_ctl.bits.rtc_en = 1; ++ /* rtc clk out enable */ ++ rtc_ctl.bits.out_32k_en = 1; ++ /* enable external crystal */ ++ rtc_ctl.bits.crystal_en = 1; ++ ++ ret = regmap_update_bits(rtc->regmap, rtc->desc->rtc_ctl.reg, ++ 0xff, rtc_ctl.val); ++ if (ret) { ++ dev_err(rtc->dev, "failed to set rtc ctrl register: %d\n", ret); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static struct platform_driver spm_p1_rtc_driver = { ++ .probe = spm_p1_rtc_probe, ++ .driver = { ++ .name = "spm-p1-rtc", ++ .of_match_table = spm_p1_rtc_id_table, ++ }, ++}; ++ ++module_platform_driver(spm_p1_rtc_driver); ++ ++MODULE_DESCRIPTION("Spacemit P1 rtc driver"); ++MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c index f78efc9760c0..e83bf21bc58e 100644 --- a/drivers/rtc/rtc-xgene.c @@ -450937,10 +497228,18 @@ index f78efc9760c0..e83bf21bc58e 100644 }; MODULE_DEVICE_TABLE(of, xgene_rtc_of_match); diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig -index d21e75d69294..125cacde45d2 100644 +index d21e75d69294..314474cdc8b9 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig -@@ -31,5 +31,6 @@ source "drivers/soc/ti/Kconfig" +@@ -24,6 +24,7 @@ source "drivers/soc/renesas/Kconfig" + source "drivers/soc/rockchip/Kconfig" + source "drivers/soc/samsung/Kconfig" + source "drivers/soc/sifive/Kconfig" ++source "drivers/soc/spacemit/Kconfig" + source "drivers/soc/starfive/Kconfig" + source "drivers/soc/sunxi/Kconfig" + source "drivers/soc/tegra/Kconfig" +@@ -31,5 +32,6 @@ source "drivers/soc/ti/Kconfig" source "drivers/soc/ux500/Kconfig" source "drivers/soc/versatile/Kconfig" source "drivers/soc/xilinx/Kconfig" @@ -450948,10 +497247,10 @@ index d21e75d69294..125cacde45d2 100644 endmenu diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile -index 0706a27d13be..37b245c36da7 100644 +index 0706a27d13be..e58b1adf9e0b 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile -@@ -29,9 +29,11 @@ obj-y += renesas/ +@@ -29,9 +29,12 @@ obj-y += renesas/ obj-y += rockchip/ obj-$(CONFIG_SOC_SAMSUNG) += samsung/ obj-y += sifive/ @@ -450963,6 +497262,7 @@ index 0706a27d13be..37b245c36da7 100644 obj-$(CONFIG_PLAT_VERSATILE) += versatile/ obj-y += xilinx/ +obj-y += xuantie/ ++obj-$(CONFIG_SOC_SPACEMIT) += spacemit/ diff --git a/drivers/soc/sophgo/Makefile b/drivers/soc/sophgo/Makefile new file mode 100644 index 000000000000..1e143d85aa17 @@ -452876,6 +499176,77 @@ index 000000000000..bf419f1821ef +MODULE_DESCRIPTION("MCU I2C driver for bm16xx soc platform"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Chao.Wei@bitmain.com>"); +diff --git a/drivers/soc/spacemit/Kconfig b/drivers/soc/spacemit/Kconfig +new file mode 100644 +index 000000000000..a0c4b37209b0 +--- /dev/null ++++ b/drivers/soc/spacemit/Kconfig +@@ -0,0 +1,13 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++ ++menu "Spacemit SoC drivers" ++ depends on SOC_SPACEMIT ++ ++config SPACEMIT_MEM_RANGE ++ bool "add memory range support for DMA device's." ++ help ++ This driver add memory range process for the devices whose address ++ space access is restricted, such as not full memory area supportted, ++ or address space is not same as cpu's. ++ ++endmenu +diff --git a/drivers/soc/spacemit/Makefile b/drivers/soc/spacemit/Makefile +new file mode 100644 +index 000000000000..97ce456fe993 +--- /dev/null ++++ b/drivers/soc/spacemit/Makefile +@@ -0,0 +1 @@ ++obj-$(CONFIG_SPACEMIT_MEM_RANGE) += spacemit-mem-range.o +diff --git a/drivers/soc/spacemit/spacemit-mem-range.c b/drivers/soc/spacemit/spacemit-mem-range.c +new file mode 100644 +index 000000000000..c34d4a14088f +--- /dev/null ++++ b/drivers/soc/spacemit/spacemit-mem-range.c +@@ -0,0 +1,39 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Spacemit memory range process ++ * ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static const struct of_device_id spacemit_mem_range_dt_match[] = { ++ { .compatible = "spacemit-dram-bus", }, ++ { }, ++}; ++ ++static int spacemit_mem_range_probe(struct platform_device *pdev) ++{ ++ return 0; ++} ++ ++static struct platform_driver spacemit_mem_range_driver = { ++ .probe = spacemit_mem_range_probe, ++ .driver = { ++ .name = "spacemit-mem-range", ++ .of_match_table = spacemit_mem_range_dt_match, ++ }, ++}; ++ ++static int __init spacemit_mem_range_drv_register(void) ++{ ++ return platform_driver_register(&spacemit_mem_range_driver); ++} ++ ++core_initcall(spacemit_mem_range_drv_register); diff --git a/drivers/soc/xuantie/Kconfig b/drivers/soc/xuantie/Kconfig new file mode 100644 index 000000000000..cf3f01d18fcc @@ -557840,7 +604211,7 @@ index 000000000000..bf30d17ce373 +/* This part must be outside protection */ +#include diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig -index e3217ce5a3f6..2ec45d7ba95a 100644 +index e3217ce5a3f6..cce525a1bb28 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -295,6 +295,12 @@ config SPI_DAVINCI @@ -557856,8 +604227,30 @@ index e3217ce5a3f6..2ec45d7ba95a 100644 config SPI_DESIGNWARE tristate "DesignWare SPI controller core support" imply SPI_MEM +@@ -974,6 +980,21 @@ config SPI_SN_F_OSPI + for connecting an SPI Flash memory over up to 8-bit wide bus. + It supports indirect access mode only. + ++config SPI_SPACEMIT_K1 ++ tristate "Spacemit K1 SPI Controller Platform Driver Support" ++ depends on SOC_SPACEMIT_K1X ++ help ++ This enables support for the SPI master controller in the Spacemit ++ k1 SOC. ++ ++config SPI_SPACEMIT_K1_QSPI ++ tristate "Spacemit K1 QuadSPI Controller Platform Driver Support" ++ depends on SOC_SPACEMIT_K1X && SPI_MEM ++ help ++ This enables support for the Spacemit K1 QuadSPI controller in master mode. ++ This controller does only support the high-level SPI memory interface ++ and not support generic SPI messages. ++ + config SPI_SPRD + tristate "Spreadtrum SPI controller" + depends on ARCH_SPRD || COMPILE_TEST diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile -index d6fdb887d97d..796f764ff554 100644 +index d6fdb887d97d..1b37dfbc5f95 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -43,6 +43,7 @@ obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o @@ -557868,6 +604261,15 @@ index d6fdb887d97d..796f764ff554 100644 obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o spi-dw-y := spi-dw-core.o spi-dw-$(CONFIG_SPI_DW_DMA) += spi-dw-dma.o +@@ -130,6 +131,8 @@ obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o + obj-$(CONFIG_SPI_SIFIVE) += spi-sifive.o + obj-$(CONFIG_SPI_SLAVE_MT27XX) += spi-slave-mt27xx.o + obj-$(CONFIG_SPI_SN_F_OSPI) += spi-sn-f-ospi.o ++obj-$(CONFIG_SPI_SPACEMIT_K1) += spi-spacemit-k1.o ++obj-$(CONFIG_SPI_SPACEMIT_K1_QSPI) += spi-spacemit-k1-qspi.o + obj-$(CONFIG_SPI_SPRD) += spi-sprd.o + obj-$(CONFIG_SPI_SPRD_ADI) += spi-sprd-adi.o + obj-$(CONFIG_SPI_STM32) += spi-stm32.o diff --git a/drivers/spi/spi-dw-mmio-quad.c b/drivers/spi/spi-dw-mmio-quad.c new file mode 100644 index 000000000000..b5e8cef263a6 @@ -558090,6 +604492,18 @@ index 000000000000..b5e8cef263a6 +MODULE_AUTHOR("linghui zeng "); +MODULE_DESCRIPTION("Memory-mapped I/O interface driver for DW ehance-spi Core"); +MODULE_LICENSE("GPL v2"); +diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c +index 805264c9c65c..2cd71545692c 100644 +--- a/drivers/spi/spi-dw-mmio.c ++++ b/drivers/spi/spi-dw-mmio.c +@@ -439,6 +439,7 @@ MODULE_DEVICE_TABLE(of, dw_spi_mmio_of_match); + #ifdef CONFIG_ACPI + static const struct acpi_device_id dw_spi_mmio_acpi_match[] = { + {"HISI0173", (kernel_ulong_t)dw_spi_pssi_init}, ++ {"SOPH0004", (kernel_ulong_t)dw_spi_pssi_init}, + {}, + }; + MODULE_DEVICE_TABLE(acpi, dw_spi_mmio_acpi_match); diff --git a/drivers/spi/spi-dw-quad.c b/drivers/spi/spi-dw-quad.c new file mode 100644 index 000000000000..8e815ecb135f @@ -559297,6 +605711,3158 @@ index 000000000000..87c12dbd40f9 +extern int dw_qspi_resume_host(struct dw_spi *dws); + +#endif /* DW_QSPI_HEADER_H */ +diff --git a/drivers/spi/spi-spacemit-k1-qspi.c b/drivers/spi/spi-spacemit-k1-qspi.c +new file mode 100644 +index 000000000000..073d0401484d +--- /dev/null ++++ b/drivers/spi/spi-spacemit-k1-qspi.c +@@ -0,0 +1,1572 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Spacemit k1 qspi controller driver ++ * ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ ++#define QSPI_WAIT_TIMEOUT (300) /* ms */ ++#define QSPI_AUTOSUSPEND_TIMEOUT 2000 ++#define K1_MPMU_ACGR 0xd4051024 ++ ++/* QSPI PMUap register */ ++#define PMUA_QSPI_CLK_RES_CTRL 0xd4282860 ++#define QSPI_CLK_SEL(x) ((x) << 6) ++#define QSPI_CLK_SEL_MASK GENMASK(8, 6) ++#define QSPI_CLK_EN BIT(4) ++#define QSPI_BUS_CLK_EN BIT(3) ++#define QSPI_CLK_RST BIT(1) ++#define QSPI_BUS_RST BIT(0) ++ ++/* QSPI memory base */ ++#define QSPI_AMBA_BASE 0xb8000000 ++#define QSPI_FLASH_A1_BASE QSPI_AMBA_BASE ++#define QSPI_FLASH_A1_TOP (QSPI_FLASH_A1_BASE + 0x4000000) ++#define QSPI_FLASH_A2_BASE QSPI_FLASH_A1_TOP ++#define QSPI_FLASH_A2_TOP (QSPI_FLASH_A2_BASE + 0x100000) ++#define QSPI_FLASH_B1_BASE QSPI_FLASH_A2_TOP ++#define QSPI_FLASH_B1_TOP (QSPI_FLASH_B1_BASE + 0x100000) ++#define QSPI_FLASH_B2_BASE QSPI_FLASH_B1_TOP ++#define QSPI_FLASH_B2_TOP (QSPI_FLASH_B2_BASE + 0x100000) ++ ++/* TX/RX/ABH buffer max */ ++#define QSPI_RX_BUFF_MAX SZ_128 ++#define QSPI_TX_BUFF_MAX SZ_256 ++#define QSPI_TX_BUFF_POP_MIN 16 ++#define QSPI_AHB_BUFF_MAX_SIZE SZ_512 ++#define QSPI_TX_DMA_BURST SZ_16 ++ ++#define QSPI_WAIT_BIT_CLEAR 0 ++#define QSPI_WAIT_BIT_SET 1 ++ ++/* QSPI Host Registers used by the driver */ ++#define QSPI_MCR 0x00 ++#define QSPI_MCR_ISD_MASK GENMASK(19, 16) ++#define QSPI_MCR_MDIS_MASK BIT(14) ++#define QSPI_MCR_CLR_TXF_MASK BIT(11) ++#define QSPI_MCR_CLR_RXF_MASK BIT(10) ++#define QSPI_MCR_DDR_EN_MASK BIT(7) ++#define QSPI_MCR_END_CFG_MASK GENMASK(3, 2) ++#define QSPI_MCR_SWRSTHD_MASK BIT(1) ++#define QSPI_MCR_SWRSTSD_MASK BIT(0) ++ ++#define QSPI_TCR 0x04 ++#define QSPI_IPCR 0x08 ++#define QSPI_IPCR_SEQID(x) ((x) << 24) ++ ++#define QSPI_FLSHCR 0x0c ++ ++#define QSPI_BUF0CR 0x10 ++#define QSPI_BUF1CR 0x14 ++#define QSPI_BUF2CR 0x18 ++#define QSPI_BUF3CR 0x1c ++#define QSPI_BUF3CR_ALLMST_MASK BIT(31) ++#define QSPI_BUF3CR_ADATSZ(x) ((x) << 8) ++#define QSPI_BUF3CR_ADATSZ_MASK GENMASK(15, 8) ++ ++#define QSPI_BFGENCR 0x20 ++#define QSPI_BFGENCR_SEQID(x) ((x) << 12) ++ ++#define QSPI_SOCCR 0x24 ++ ++#define QSPI_BUF0IND 0x30 ++#define QSPI_BUF1IND 0x34 ++#define QSPI_BUF2IND 0x38 ++ ++#define QSPI_SFAR 0x100 ++#define QSPI_SFACR 0x104 ++ ++#define QSPI_SMPR 0x108 ++#define QSPI_SMPR_DDRSMP_MASK GENMASK(18, 16) ++#define QSPI_SMPR_FSDLY_MASK BIT(6) ++#define QSPI_SMPR_FSPHS_MASK BIT(5) ++#define QSPI_SMPR_FSPHS_CLK (416000000) ++#define QSPI_SMPR_HSENA_MASK BIT(0) ++ ++#define QSPI_RBSR 0x10c ++ ++#define QSPI_RBCT 0x110 ++#define QSPI_RBCT_WMRK_MASK GENMASK(4, 0) ++#define QSPI_RBCT_RXBRD_MASK BIT(8) ++ ++#define QSPI_TBSR 0x150 ++#define QSPI_TBDR 0x154 ++#define QSPI_TBCT 0x158 ++#define QSPI_TX_WMRK (QSPI_TX_DMA_BURST / 4 - 1) ++ ++#define QSPI_SR 0x15c ++#define QSPI_SR_BUSY BIT(0) ++#define QSPI_SR_IP_ACC_MASK BIT(1) ++#define QSPI_SR_AHB_ACC_MASK BIT(2) ++#define QSPI_SR_TXFULL BIT(27) ++ ++#define QSPI_FR 0x160 ++#define QSPI_FR_TFF_MASK BIT(0) ++#define QSPI_FR_IPGEF BIT(4) ++#define QSPI_FR_IPIEF BIT(6) ++#define QSPI_FR_IPAEF BIT(7) ++#define QSPI_FR_IUEF BIT(11) ++#define QSPI_FR_ABOF BIT(12) ++#define QSPI_FR_AIBSEF BIT(13) ++#define QSPI_FR_AITEF BIT(14) ++#define QSPI_FR_ABSEF BIT(15) ++#define QSPI_FR_RBDF BIT(16) ++#define QSPI_FR_RBOF BIT(17) ++#define QSPI_FR_ILLINE BIT(23) ++#define QSPI_FR_TBUF BIT(26) ++#define QSPI_FR_TBFF BIT(27) ++#define BUFFER_FR_FLAG (QSPI_FR_ABOF | QSPI_FR_RBOF | QSPI_FR_TBUF) ++ ++#define COMMAND_FR_FLAG (QSPI_FR_ABSEF | QSPI_FR_AITEF | \ ++ QSPI_FR_AIBSEF | QSPI_FR_IUEF | \ ++ QSPI_FR_IPAEF | QSPI_FR_IPIEF | \ ++ QSPI_FR_IPGEF) ++ ++#define QSPI_RSER 0x164 ++#define QSPI_RSER_TFIE BIT(0) ++#define QSPI_RSER_IPGEIE BIT(4) ++#define QSPI_RSER_IPIEIE BIT(6) ++#define QSPI_RSER_IPAEIE BIT(7) ++#define QSPI_RSER_IUEIE BIT(11) ++#define QSPI_RSER_ABOIE BIT(12) ++#define QSPI_RSER_AIBSIE BIT(13) ++#define QSPI_RSER_AITIE BIT(14) ++#define QSPI_RSER_ABSEIE BIT(15) ++#define QSPI_RSER_RBDIE BIT(16) ++#define QSPI_RSER_RBOIE BIT(17) ++#define QSPI_RSER_RBDDE BIT(21) ++#define QSPI_RSER_ILLINIE BIT(23) ++#define QSPI_RSER_TBFDE BIT(25) ++#define QSPI_RSER_TBUIE BIT(26) ++#define QSPI_RSER_TBFIE BIT(27) ++#define BUFFER_ERROR_INT (QSPI_RSER_ABOIE | QSPI_RSER_RBOIE | \ ++ QSPI_RSER_TBUIE) ++ ++#define COMMAND_ERROR_INT (QSPI_RSER_ABSEIE | QSPI_RSER_AITIE | \ ++ QSPI_RSER_AIBSIE | QSPI_RSER_IUEIE | \ ++ QSPI_RSER_IPAEIE | QSPI_RSER_IPIEIE | \ ++ QSPI_RSER_IPGEIE) ++ ++#define QSPI_SPNDST 0x168 ++#define QSPI_SPTRCLR 0x16c ++#define QSPI_SPTRCLR_IPPTRC BIT(8) ++#define QSPI_SPTRCLR_BFPTRC BIT(0) ++ ++#define QSPI_SFA1AD 0x180 ++#define QSPI_SFA2AD 0x184 ++#define QSPI_SFB1AD 0x188 ++#define QSPI_SFB2AD 0x18c ++#define QSPI_DLPR 0x190 ++#define QSPI_RBDR(x) (0x200 + ((x) * 4)) ++ ++#define QSPI_LUTKEY 0x300 ++#define QSPI_LUTKEY_VALUE 0x5af05af0 ++ ++#define QSPI_LCKCR 0x304 ++#define QSPI_LCKER_LOCK BIT(0) ++#define QSPI_LCKER_UNLOCK BIT(1) ++ ++#define QSPI_LUT_BASE 0x310 ++/* 16Bytes per sequence */ ++#define QSPI_LUT_REG(seqid, i) (QSPI_LUT_BASE + (seqid) * 16 + (i) * 4) ++ ++/* ++ * QSPI Sequence index. ++ * index 0 is preset at boot for AHB read, ++ * index 1 is used for other command. ++ */ ++#define SEQID_LUT_AHBREAD_ID 0 ++#define SEQID_LUT_SHARED_ID 1 ++ ++/* QSPI Instruction set for the LUT register */ ++#define LUT_INSTR_STOP 0 ++#define LUT_INSTR_CMD 1 ++#define LUT_INSTR_ADDR 2 ++#define LUT_INSTR_DUMMY 3 ++#define LUT_INSTR_MODE 4 ++#define LUT_INSTR_MODE2 5 ++#define LUT_INSTR_MODE4 6 ++#define LUT_INSTR_READ 7 ++#define LUT_INSTR_WRITE 8 ++#define LUT_INSTR_JMP_ON_CS 9 ++#define LUT_INSTR_ADDR_DDR 10 ++#define LUT_INSTR_MODE_DDR 11 ++#define LUT_INSTR_MODE2_DDR 12 ++#define LUT_INSTR_MODE4_DDR 13 ++#define LUT_INSTR_READ_DDR 14 ++#define LUT_INSTR_WRITE_DDR 15 ++#define LUT_INSTR_DATA_LEARN 16 ++ ++/* ++ * The PAD definitions for LUT register. ++ * ++ * The pad stands for the number of IO lines [0:3]. ++ * For example, the quad read needs four IO lines, ++ * so you should use LUT_PAD(4). ++ */ ++#define LUT_PAD(x) (fls(x) - 1) ++ ++/* ++ * One sequence must be consisted of 4 LUT enteries(16Bytes). ++ * LUT entries with the following register layout: ++ * b'31 b'0 ++ * --------------------------------------------------------------------------- ++ * |INSTR1[15~10]|PAD1[9~8]|OPRND1[7~0] | INSTR0[15~10]|PAD0[9~8]|OPRND0[7~0]| ++ * --------------------------------------------------------------------------- ++ */ ++#define LUT_DEF(idx, ins, pad, opr) \ ++ ((((ins) << 10) | ((pad) << 8) | (opr)) << (((idx) & 0x1) * 16)) ++ ++#define READ_FROM_CACHE_OP 0x03 ++#define READ_FROM_CACHE_OP_Fast 0x0b ++#define READ_FROM_CACHE_OP_X2 0x3b ++#define READ_FROM_CACHE_OP_X4 0x6b ++#define READ_FROM_CACHE_OP_DUALIO 0xbb ++#define READ_FROM_CACHE_OP_QUADIO 0xeb ++ ++u32 reg_offset_table[] = { ++ QSPI_MCR, QSPI_TCR, QSPI_IPCR, QSPI_FLSHCR, ++ QSPI_BUF0CR, QSPI_BUF1CR, QSPI_BUF2CR, QSPI_BUF3CR, ++ QSPI_BFGENCR, QSPI_SOCCR, QSPI_BUF0IND, QSPI_BUF1IND, ++ QSPI_BUF2IND, QSPI_SFAR, QSPI_SFACR, QSPI_SMPR, ++ QSPI_RBSR, QSPI_RBCT, QSPI_TBSR, QSPI_TBDR, ++ QSPI_TBCT, QSPI_SR, QSPI_FR, QSPI_RSER, ++ QSPI_SPNDST, QSPI_SPTRCLR, QSPI_SFA1AD, QSPI_SFA2AD, ++ QSPI_SFB1AD, QSPI_SFB2AD, QSPI_DLPR, QSPI_LUTKEY, ++ QSPI_LCKCR ++}; ++ ++/* k1 qspi host priv */ ++struct k1_qspi { ++ struct device *dev; ++ struct spi_controller *ctrl; ++ void __iomem *io_map; ++ phys_addr_t io_phys; ++ ++ void __iomem *ahb_map; ++ phys_addr_t memmap_base; ++ u32 memmap_size; ++ ++ u32 sfa1ad; ++ u32 sfa2ad; ++ u32 sfb1ad; ++ u32 sfb2ad; ++ ++ u32 pmuap_reg; ++ void __iomem *pmuap_addr; ++ u32 mpmu_acgr_reg; ++ void __iomem *mpmu_acgr; ++ ++ u32 rx_buf_size; ++ u32 tx_buf_size; ++ u32 ahb_buf_size; ++ u32 ahb_read_enable; ++ u32 tx_unit_size; ++ u32 rx_unit_size; ++ ++ u32 cmd_interrupt; ++ u32 fr_error_flag; ++ ++ u32 tx_dma_enable; ++ u32 tx_wmrk; ++ struct dma_chan *tx_dma; ++ struct dma_slave_config tx_dma_cfg; ++ ++ u32 rx_dma_enable; ++ struct dma_chan *rx_dma; ++ ++ struct sg_table sgt; ++ struct completion dma_completion; ++ ++ u32 cs_selected; ++ u32 max_hz; ++ u32 endian_xchg; ++ u32 dma_enable; ++ ++ struct clk *clk, *bus_clk; ++ struct reset_control *resets; ++ ++ struct completion cmd_completion; ++ struct mutex lock; ++ int selected; ++ ++ u32 tx_underrun_err; ++ u32 rx_overflow_err; ++ u32 ahb_overflow_err; ++}; ++ ++enum qpsi_cs { ++ QSPI_CS_A1 = 0, ++ QSPI_CS_A2, ++ QSPI_CS_B1, ++ QSPI_CS_B2, ++ QSPI_CS_MAX, ++}; ++#define QSPI_DEFAULT_CS (QSPI_CS_A1) ++ ++enum qpsi_mode { ++ QSPI_NORMAL_MODE = 0, ++ QSPI_DISABLE_MODE, ++ QSPI_STOP_MODE, ++}; ++ ++static ssize_t qspi_info_show(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ struct k1_qspi *t_qspi = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "%s: rx_dma_en=%d, rx_buf_size=0x%x, tx_dma_en=%d,\n" ++ "tx_buf_size=0x%x, ahb_read_enable=%d, ahb_buf_size=0x%x\n", ++ dev_name(dev), t_qspi->rx_dma_enable, t_qspi->rx_buf_size, ++ t_qspi->tx_dma_enable, t_qspi->tx_buf_size, ++ t_qspi->ahb_read_enable, t_qspi->ahb_buf_size); ++} ++static DEVICE_ATTR_RO(qspi_info); ++ ++static ssize_t qspi_err_resp_show(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ struct k1_qspi *t_qspi = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "%s: tx_underrun (%d), rx_overflow (%d), ahb_overflow (%d)\n", ++ dev_name(dev), t_qspi->tx_underrun_err, t_qspi->rx_overflow_err, ++ t_qspi->ahb_overflow_err); ++} ++static DEVICE_ATTR_RO(qspi_err_resp); ++ ++static struct attribute *qspi_dev_attrs[] = { ++ &dev_attr_qspi_info.attr, ++ &dev_attr_qspi_err_resp.attr, ++ NULL, ++}; ++ ++static struct attribute_group qspi_dev_group = { ++ .name = "qspi_dev", ++ .attrs = qspi_dev_attrs, ++}; ++ ++static void qspi_writel(struct k1_qspi *qspi, u32 val, void __iomem *addr) ++{ ++ if (qspi->endian_xchg) ++ iowrite32be(val, addr); ++ else ++ iowrite32(val, addr); ++} ++ ++static u32 qspi_readl(struct k1_qspi *qspi, void __iomem *addr) ++{ ++ if (qspi->endian_xchg) ++ return ioread32be(addr); ++ else ++ return ioread32(addr); ++} ++ ++static int qspi_set_func_clk(struct k1_qspi *qspi) ++{ ++ int ret = 0; ++ ++ qspi->clk = devm_clk_get(qspi->dev, "qspi_clk"); ++ if (IS_ERR_OR_NULL(qspi->clk)) { ++ dev_err(qspi->dev, "can not find the clock\n"); ++ return -EINVAL; ++ } ++ ++ qspi->bus_clk = devm_clk_get(qspi->dev, "qspi_bus_clk"); ++ if (IS_ERR_OR_NULL(qspi->bus_clk)) { ++ dev_err(qspi->dev, "can not find the bus clock\n"); ++ return -EINVAL; ++ } ++ ++ ret = clk_set_rate(qspi->clk, qspi->max_hz); ++ if (ret) { ++ dev_err(qspi->dev, "fail to set clk, ret:%d\n", ret); ++ return ret; ++ } ++ ++ ret = clk_prepare_enable(qspi->clk); ++ if (ret) { ++ dev_err(qspi->dev, "fail to enable clk, ret:%d\n", ret); ++ return ret; ++ } ++ ++ clk_prepare_enable(qspi->bus_clk); ++ ++ dev_dbg(qspi->dev, "bus clock %dHz, PMUap reg[0x%08x]:0x%08x\n", ++ qspi->max_hz, qspi->pmuap_reg, qspi_readl(qspi, qspi->pmuap_addr)); ++ ++ return 0; ++} ++ ++static void qspi_config_mfp(struct k1_qspi *qspi) ++{ ++ int cs = qspi->cs_selected; ++ ++ dev_info(qspi->dev, "config mfp for cs:[%d]\n", cs); ++} ++ ++static int k1_qspi_readl_poll_tout(struct k1_qspi *qspi, void __iomem *base, ++ u32 mask, u32 timeout_us, u8 wait_set) ++{ ++ u32 reg; ++ ++ if (qspi->endian_xchg) ++ mask = swab32(mask); ++ ++ if (wait_set) ++ return readl_poll_timeout(base, reg, (reg & mask), 10, timeout_us); ++ else ++ return readl_poll_timeout(base, reg, !(reg & mask), 10, timeout_us); ++} ++ ++static void qspi_reset(struct k1_qspi *qspi) ++{ ++ uint32_t reg; ++ int err; ++ ++ /* QSPI_SR[QSPI_SR_BUSY] must be 0 */ ++ err = k1_qspi_readl_poll_tout(qspi, qspi->io_map + QSPI_SR, ++ QSPI_SR_BUSY, QSPI_WAIT_TIMEOUT*1000, QSPI_WAIT_BIT_CLEAR); ++ if (err) { ++ dev_err(qspi->dev, "failed to reset qspi host.\n"); ++ } else { ++ /* qspi softreset first */ ++ reg = qspi_readl(qspi, qspi->io_map + QSPI_MCR); ++ reg |= QSPI_MCR_SWRSTHD_MASK | QSPI_MCR_SWRSTSD_MASK; ++ qspi_writel(qspi, reg, qspi->io_map + QSPI_MCR); ++ reg = qspi_readl(qspi, qspi->io_map + QSPI_MCR); ++ if ((reg & 0x3) != 0x3) ++ dev_info(qspi->dev, "reset ignored 0x%x.\n", reg); ++ ++ udelay(1); ++ reg &= ~(QSPI_MCR_SWRSTHD_MASK | QSPI_MCR_SWRSTSD_MASK); ++ qspi_writel(qspi, reg, qspi->io_map + QSPI_MCR); ++ } ++} ++ ++ ++static void qspi_enter_mode(struct k1_qspi *qspi, uint32_t mode) ++{ ++ uint32_t mcr; ++ ++ mcr = qspi_readl(qspi, qspi->io_map + QSPI_MCR); ++ if (mode == QSPI_NORMAL_MODE) ++ mcr &= ~QSPI_MCR_MDIS_MASK; ++ else if (mode == QSPI_DISABLE_MODE) ++ mcr |= QSPI_MCR_MDIS_MASK; ++ qspi_writel(qspi, mcr, qspi->io_map + QSPI_MCR); ++} ++ ++static void qspi_write_sfar(struct k1_qspi *qspi, uint32_t val) ++{ ++ int err; ++ ++ /* QSPI_SR[IP_ACC] must be 0 */ ++ err = k1_qspi_readl_poll_tout(qspi, qspi->io_map + QSPI_SR, ++ QSPI_SR_IP_ACC_MASK, QSPI_WAIT_TIMEOUT*1000, QSPI_WAIT_BIT_CLEAR); ++ if (err) ++ dev_err(qspi->dev, "failed to set QSPI_SFAR.\n"); ++ else ++ qspi_writel(qspi, val, qspi->io_map + QSPI_SFAR); ++} ++ ++/* ++ * IP Command Trigger could not be executed Error Flag may happen for write ++ * access to RBCT/SFAR register, need retry for these two register ++ */ ++static void qspi_write_rbct(struct k1_qspi *qspi, uint32_t val) ++{ ++ int err; ++ ++ /* QSPI_SR[IP_ACC] must be 0 */ ++ err = k1_qspi_readl_poll_tout(qspi, qspi->io_map + QSPI_SR, ++ QSPI_SR_IP_ACC_MASK, QSPI_WAIT_TIMEOUT*1000, QSPI_WAIT_BIT_CLEAR); ++ if (err) ++ dev_err(qspi->dev, "failed to set QSPI_RBCT.\n"); ++ else ++ qspi_writel(qspi, val, qspi->io_map + QSPI_RBCT); ++} ++ ++void qspi_init_ahbread(struct k1_qspi *qspi, int seq_id) ++{ ++ u32 buf_cfg = 0; ++ ++ /* Disable BUF0~BUF1, use BUF3 for all masters */ ++ qspi_writel(qspi, 0, qspi->io_map + QSPI_BUF0IND); ++ qspi_writel(qspi, 0, qspi->io_map + QSPI_BUF1IND); ++ qspi_writel(qspi, 0, qspi->io_map + QSPI_BUF2IND); ++ ++ buf_cfg = QSPI_BUF3CR_ALLMST_MASK | ++ QSPI_BUF3CR_ADATSZ((qspi->ahb_buf_size / 8)); ++ ++ /* AHB Master port */ ++ qspi_writel(qspi, 0xe, qspi->io_map + QSPI_BUF0CR); ++ qspi_writel(qspi, 0xe, qspi->io_map + QSPI_BUF1CR); ++ qspi_writel(qspi, 0xe, qspi->io_map + QSPI_BUF2CR); ++ qspi_writel(qspi, buf_cfg, qspi->io_map + QSPI_BUF3CR); // other masters ++ ++ /* set AHB read sequence id */ ++ qspi_writel(qspi, QSPI_BFGENCR_SEQID(seq_id), qspi->io_map + QSPI_BFGENCR); ++ dev_info(qspi->dev, "AHB buf size: %d\n", qspi->ahb_buf_size); ++} ++ ++/* ++ * If the slave device content being changed by Write/Erase, need to ++ * invalidate the AHB buffer. This can be achieved by doing the reset ++ * of controller after setting MCR0[SWRESET] bit. ++ */ ++static inline void k1_qspi_invalid(struct k1_qspi *qspi) ++{ ++ u32 reg; ++ ++ reg = qspi_readl(qspi, qspi->io_map + QSPI_MCR); ++ reg |= QSPI_MCR_SWRSTHD_MASK | QSPI_MCR_SWRSTSD_MASK; ++ qspi_writel(qspi, reg, qspi->io_map + QSPI_MCR); ++ ++ /* ++ * The minimum delay : 1 AHB + 2 SFCK clocks. ++ * Delay 1 us is enough. ++ */ ++ udelay(1); ++ ++ reg &= ~(QSPI_MCR_SWRSTHD_MASK | QSPI_MCR_SWRSTSD_MASK); ++ qspi_writel(qspi, reg, qspi->io_map + QSPI_MCR); ++} ++ ++static void k1_qspi_prepare_lut(struct k1_qspi *qspi, ++ const struct spi_mem_op *op, u32 seq_id) ++{ ++ u32 lutval[4] = {0,}; ++ int lutidx = 0; ++ int i; ++ ++ /* qspi cmd */ ++ lutval[0] |= LUT_DEF(lutidx, LUT_INSTR_CMD, ++ LUT_PAD(op->cmd.buswidth), ++ op->cmd.opcode); ++ lutidx++; ++ ++ /* addr bytes */ ++ if (op->addr.nbytes) { ++ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_INSTR_ADDR, ++ LUT_PAD(op->addr.buswidth), ++ op->addr.nbytes * 8); ++ lutidx++; ++ } ++ ++ /* dummy bytes, if needed */ ++ if (op->dummy.nbytes) { ++ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_INSTR_DUMMY, ++ LUT_PAD(op->dummy.buswidth), ++ op->dummy.nbytes * 8 / ++ op->dummy.buswidth); ++ lutidx++; ++ } ++ ++ /* read/write data bytes */ ++ if (op->data.nbytes) { ++ lutval[lutidx / 2] |= LUT_DEF(lutidx, ++ op->data.dir == SPI_MEM_DATA_IN ? ++ LUT_INSTR_READ : LUT_INSTR_WRITE, ++ LUT_PAD(op->data.buswidth), ++ 0); ++ lutidx++; ++ } ++ ++ /* stop condition. */ ++ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_INSTR_STOP, 0, 0); ++ ++ /* unlock LUT */ ++ qspi_writel(qspi, QSPI_LUTKEY_VALUE, qspi->io_map + QSPI_LUTKEY); ++ qspi_writel(qspi, QSPI_LCKER_UNLOCK, qspi->io_map + QSPI_LCKCR); ++ ++ /* fill LUT register */ ++ for (i = 0; i < ARRAY_SIZE(lutval); i++) ++ qspi_writel(qspi, lutval[i], qspi->io_map + QSPI_LUT_REG(seq_id, i)); ++ ++ /* lock LUT */ ++ qspi_writel(qspi, QSPI_LUTKEY_VALUE, qspi->io_map + QSPI_LUTKEY); ++ qspi_writel(qspi, QSPI_LCKER_LOCK, qspi->io_map + QSPI_LCKCR); ++ ++ dev_dbg(qspi->dev, "opcode:0x%x, lut_reg[0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x]\n", ++ op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3]); ++} ++ ++static void k1_qspi_enable_interrupt(struct k1_qspi *qspi, u32 val) ++{ ++ u32 resr = 0; ++ ++ resr = qspi_readl(qspi, qspi->io_map + QSPI_RSER); ++ resr |= val; ++ qspi_writel(qspi, resr, qspi->io_map + QSPI_RSER); ++} ++ ++static void k1_qspi_disable_interrupt(struct k1_qspi *qspi, u32 val) ++{ ++ u32 resr = 0; ++ ++ resr = qspi_readl(qspi, qspi->io_map + QSPI_RSER); ++ resr &= ~val; ++ qspi_writel(qspi, resr, qspi->io_map + QSPI_RSER); ++} ++ ++static void k1_qspi_prepare_dma(struct k1_qspi *qspi) ++{ ++ struct dma_slave_config dma_cfg; ++ struct device *dev = qspi->dev; ++ dma_cap_mask_t mask; ++ ++ if (qspi->rx_dma_enable) { ++ /* RX DMA: DMA_MEMCPY type */ ++ dma_cap_zero(mask); ++ dma_cap_set(DMA_MEMCPY, mask); ++ qspi->rx_dma = dma_request_chan_by_mask(&mask); ++ if (IS_ERR_OR_NULL(qspi->rx_dma)) { ++ dev_err(dev, "rx dma request channel failed\n"); ++ qspi->rx_dma = NULL; ++ qspi->rx_dma_enable = 0; ++ } else { ++ dev_dbg(dev, "rx dma enable, channel:%d\n", qspi->rx_dma->chan_id); ++ } ++ } ++ ++ if (qspi->tx_dma_enable) { ++ /* TX DMA: DMA_SLAVE type */ ++ qspi->tx_dma = dma_request_slave_channel(dev, "tx-dma"); ++ if (qspi->tx_dma) { ++ memset(&dma_cfg, 0, sizeof(struct dma_slave_config)); ++ dma_cfg.direction = DMA_MEM_TO_DEV; ++ dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; ++ dma_cfg.dst_addr = qspi->io_phys + QSPI_TBDR - 4; ++ dma_cfg.dst_maxburst = QSPI_TX_DMA_BURST; ++ if (dmaengine_slave_config(qspi->tx_dma, &dma_cfg)) { ++ dev_err(dev, "tx dma slave config failed\n"); ++ dma_release_channel(qspi->tx_dma); ++ qspi->tx_dma = NULL; ++ qspi->tx_dma_enable = 0; ++ } else { ++ dev_dbg(dev, "tx dma enable, channel:%d\n", qspi->tx_dma->chan_id); ++ } ++ } else { ++ qspi->tx_dma_enable = 0; ++ } ++ } ++ ++ if (qspi->tx_dma || qspi->rx_dma) ++ init_completion(&qspi->dma_completion); ++} ++ ++static void k1_qspi_dma_callback(void *arg) ++{ ++ struct completion *dma_completion = arg; ++ ++ complete(dma_completion); ++} ++ ++int k1_qspi_tx_dma_exec(struct k1_qspi *qspi, ++ const struct spi_mem_op *op) ++{ ++ struct dma_async_tx_descriptor *desc; ++ enum dma_transfer_direction dma_dir; ++ dma_cookie_t cookie; ++ int err = 0; ++ ++ if (!virt_addr_valid(op->data.buf.in) || ++ spi_controller_dma_map_mem_op_data(qspi->ctrl, op, &qspi->sgt)) { ++ dev_err(qspi->dev, "tx dma spi_controller_dma_map_mem_op_data error\n"); ++ return -EIO; ++ } ++ ++ dma_dir = DMA_MEM_TO_DEV; ++ desc = dmaengine_prep_slave_sg(qspi->tx_dma, qspi->sgt.sgl, qspi->sgt.nents, ++ dma_dir, DMA_PREP_INTERRUPT); ++ if (!desc) { ++ dev_err(qspi->dev, "tx dma dmaengine_prep_slave_sg error\n"); ++ err = -ENOMEM; ++ goto out; ++ } ++ ++ reinit_completion(&qspi->dma_completion); ++ desc->callback = k1_qspi_dma_callback; ++ desc->callback_param = &qspi->dma_completion; ++ ++ cookie = dmaengine_submit(desc); ++ err = dma_submit_error(cookie); ++ if (err) { ++ dev_err(qspi->dev, "tx dma dmaengine_submit error\n"); ++ goto out; ++ } ++ ++ dma_async_issue_pending(qspi->tx_dma); ++ ++ return 0; ++out: ++ spi_controller_dma_unmap_mem_op_data(qspi->ctrl, op, &qspi->sgt); ++ return err; ++} ++ ++int k1_qspi_rx_dma_exec(struct k1_qspi *qspi, dma_addr_t dma_dst, ++ dma_addr_t dma_src, size_t len) ++{ ++ dma_cookie_t cookie; ++ enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; ++ struct dma_async_tx_descriptor *desc; ++ int ret; ++ ++ desc = dmaengine_prep_dma_memcpy(qspi->rx_dma, dma_dst, dma_src, len, flags); ++ if (!desc) { ++ dev_err(qspi->dev, "dmaengine_prep_dma_memcpy error\n"); ++ return -EIO; ++ } ++ ++ reinit_completion(&qspi->dma_completion); ++ desc->callback = k1_qspi_dma_callback; ++ desc->callback_param = &qspi->dma_completion; ++ cookie = dmaengine_submit(desc); ++ ret = dma_submit_error(cookie); ++ if (ret) { ++ dev_err(qspi->dev, "dma_submit_error %d\n", cookie); ++ return -EIO; ++ } ++ ++ dma_async_issue_pending(qspi->rx_dma); ++ ret = wait_for_completion_timeout(&qspi->dma_completion, ++ msecs_to_jiffies(len)); ++ if (ret <= 0) { ++ dmaengine_terminate_sync(qspi->rx_dma); ++ dev_err(qspi->dev, "DMA wait_for_completion_timeout\n"); ++ return -ETIMEDOUT; ++ } ++ ++ return 0; ++} ++ ++static int k1_qspi_rx_dma_sg(struct k1_qspi *qspi, struct sg_table rx_sg, ++ loff_t from) ++{ ++ struct scatterlist *sg; ++ dma_addr_t dma_src = qspi->memmap_base + from; ++ dma_addr_t dma_dst; ++ int i, len, ret; ++ ++ for_each_sg(rx_sg.sgl, sg, rx_sg.nents, i) { ++ dma_dst = sg_dma_address(sg); ++ len = sg_dma_len(sg); ++ dev_dbg(qspi->dev, "rx dma, dst:0x%pad, src:0x%pad, len:%d\n", ++ &dma_dst, &dma_src, len); ++ ret = k1_qspi_rx_dma_exec(qspi, dma_dst, dma_src, len); ++ if (ret) ++ return ret; ++ dma_src += len; ++ } ++ ++ return 0; ++} ++ ++static int k1_qspi_ahb_read(struct k1_qspi *qspi, ++ const struct spi_mem_op *op) ++{ ++ int ret = 0; ++ u32 len = op->data.nbytes; ++ u32 from = op->addr.val; ++ struct sg_table sgt; ++ ++ /* Read out the data directly from the AHB buffer. */ ++ dev_dbg(qspi->dev, "ahb read %d bytes from address:0x%llx\n", ++ len, (qspi->memmap_base + op->addr.val)); ++ if (from + len > qspi->memmap_size) ++ return -EOPNOTSUPP; ++ ++ /* firstly try the DMA */ ++ if (qspi->rx_dma_enable) { ++ if (virt_addr_valid(op->data.buf.in) && ++ !spi_controller_dma_map_mem_op_data(qspi->ctrl, op, &sgt)) { ++ ret = k1_qspi_rx_dma_sg(qspi, sgt, from); ++ spi_controller_dma_unmap_mem_op_data(qspi->ctrl, op, &sgt); ++ } else { ++ ret = -EIO; ++ dev_err(qspi->dev, "spi_controller_dma_map_mem_op_data error\n"); ++ } ++ ++ /* DMA completed */ ++ if (!ret) ++ return 0; ++ } ++ ++ if (qspi->rx_dma_enable && ret) ++ dev_dbg(qspi->dev, "rx dma read fallback to memcpy read.\n"); ++ ++ if (!qspi->rx_dma_enable || (qspi->rx_dma_enable && ret)) ++ memcpy(op->data.buf.in, (qspi->ahb_map + op->addr.val), len); ++ ++ return 0; ++} ++ ++static int k1_qspi_fill_txfifo(struct k1_qspi *qspi, ++ const struct spi_mem_op *op) ++{ ++ void __iomem *base = qspi->io_map; ++ int i; ++ u32 val; ++ u32 tbsr; ++ u32 wait_cnt; ++ ++ if (!qspi->tx_dma_enable || (op->data.nbytes % QSPI_TX_BUFF_POP_MIN)) { ++ qspi->tx_wmrk = 0; ++ for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) { ++ memcpy(&val, op->data.buf.out + i, 4); ++ qspi_writel(qspi, val, base + QSPI_TBDR); ++ } ++ ++ if (i < op->data.nbytes) { ++ memcpy(&val, op->data.buf.out + i, op->data.nbytes - i); ++ qspi_writel(qspi, val, base + QSPI_TBDR); ++ } ++ ++ /* ++ * There must be at least 128bit data available in TX FIFO ++ * for any pop operation otherwise QSPI_FR[TBUF] will be set ++ */ ++ for (i = op->data.nbytes; i < ALIGN_DOWN(op->data.nbytes + ++ (QSPI_TX_BUFF_POP_MIN - 1), QSPI_TX_BUFF_POP_MIN); i += 4) ++ qspi_writel(qspi, 0, base + QSPI_TBDR); ++ } else { ++ /* ++ * Note that the number of bytes per DMA loop is determined ++ * by thee size of the QSPI_TBCT[WMRK]. ++ * bytes per DMA loop = (QSPI_TBCT[WMRK] + 1) * 4. ++ * set QSPI_TX_WMRK as the TX watermark. ++ */ ++ qspi->tx_wmrk = QSPI_TX_WMRK; ++ qspi_writel(qspi, qspi->tx_wmrk, base + QSPI_TBCT); ++ ++ /* config DMA channel and start */ ++ if (k1_qspi_tx_dma_exec(qspi, op)) { ++ qspi->tx_wmrk = 0; ++ dev_err(qspi->dev, "failed to start tx dma\n"); ++ return -EIO; ++ } ++ /* enable DMA request */ ++ k1_qspi_enable_interrupt(qspi, QSPI_RSER_TBFDE); ++ ++ /* ++ * before trigger qspi to send data to external bus, TX buffer ++ * need to have some data, or underrun error may happen. ++ * DMA need some time to write data to TX buffer, so add ++ * a delay here for this requirement. ++ */ ++ wait_cnt = 0; ++ tbsr = qspi_readl(qspi, base + QSPI_TBSR); ++ while (4 * (tbsr >> 16) < min_t(unsigned int, ++ qspi->tx_buf_size, op->data.nbytes)) { ++ udelay(1); ++ tbsr = qspi_readl(qspi, base + QSPI_TBSR); ++ if (wait_cnt++ >= 100) { ++ msleep(100); ++ tbsr = qspi_readl(qspi, base + QSPI_TBSR); ++ if (4 * (tbsr >> 16) < min_t(unsigned int, ++ qspi->tx_buf_size, op->data.nbytes)) { ++ dev_err(qspi->dev, "tx dma failed to fill txbuf\n"); ++ /* disable all interrupts */ ++ qspi_writel(qspi, 0, qspi->io_map + QSPI_RSER); ++ dmaengine_terminate_all(qspi->tx_dma); ++ spi_controller_dma_unmap_mem_op_data(qspi->ctrl, ++ op, &qspi->sgt); ++ qspi->tx_wmrk = 0; ++ ++ return -EIO; ++ } ++ ++ break; ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++static void k1_qspi_read_rxfifo(struct k1_qspi *qspi, const struct spi_mem_op *op) ++{ ++ void __iomem *base = qspi->io_map; ++ int i; ++ u8 *buf = op->data.buf.in; ++ u32 val; ++ ++ dev_dbg(qspi->dev, "ip read %d bytes\n", op->data.nbytes); ++ for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) { ++ val = qspi_readl(qspi, base + QSPI_RBDR(i / 4)); ++ memcpy(buf + i, &val, 4); ++ } ++ ++ if (i < op->data.nbytes) { ++ val = qspi_readl(qspi, base + QSPI_RBDR(i / 4)); ++ memcpy(buf + i, &val, op->data.nbytes - i); ++ } ++} ++ ++static irqreturn_t k1_qspi_irq_handler(int irq, void *dev_id) ++{ ++ struct k1_qspi *qspi = dev_id; ++ u32 fr; ++ ++ /* disable all interrupts */ ++ qspi_writel(qspi, 0, qspi->io_map + QSPI_RSER); ++ ++ fr = qspi_readl(qspi, qspi->io_map + QSPI_FR); ++ dev_dbg(qspi->dev, "QSPI_FR:0x%08x\n", fr); ++ /* check QSPI_FR error flag */ ++ if (fr & (COMMAND_FR_FLAG | BUFFER_FR_FLAG)) { ++ qspi->fr_error_flag = fr & (COMMAND_FR_FLAG | BUFFER_FR_FLAG); ++ ++ if (fr & QSPI_FR_IPGEF) ++ dev_err(qspi->dev, "IP command trigger during AHB grant\n"); ++ if (fr & QSPI_FR_IPIEF) ++ dev_err(qspi->dev, "IP command trigger could not be executed\n"); ++ if (fr & QSPI_FR_IPAEF) ++ dev_err(qspi->dev, "IP command trigger during AHB access\n"); ++ if (fr & QSPI_FR_IUEF) ++ dev_err(qspi->dev, "IP command usage error\n"); ++ if (fr & QSPI_FR_AIBSEF) ++ dev_err(qspi->dev, "AHB illegal burst size error\n"); ++ if (fr & QSPI_FR_AITEF) ++ dev_err(qspi->dev, "AHB illegal trancaction error\n"); ++ if (fr & QSPI_FR_ABSEF) ++ dev_err(qspi->dev, "AHB sequence error\n"); ++ ++ if (fr & QSPI_FR_TBUF) { ++ /* disable TBFDE interrupt */ ++ k1_qspi_disable_interrupt(qspi, QSPI_RSER_TBFDE); ++ dev_err_ratelimited(qspi->dev, "TX buffer underrun\n"); ++ qspi->tx_underrun_err++; ++ } ++ if (fr & QSPI_FR_RBOF) { ++ dev_err(qspi->dev, "RX buffer overflow\n"); ++ qspi->rx_overflow_err++; ++ } ++ if (fr & QSPI_FR_ABOF) { ++ dev_err(qspi->dev, "AHB buffer overflow\n"); ++ qspi->ahb_overflow_err++; ++ } ++ } ++ ++ if (qspi->cmd_interrupt && (fr & (QSPI_FR_TFF_MASK | COMMAND_FR_FLAG | BUFFER_FR_FLAG))) ++ complete(&qspi->cmd_completion); ++ ++ return IRQ_HANDLED; ++} ++ ++static int k1_qspi_do_op(struct k1_qspi *qspi, const struct spi_mem_op *op) ++{ ++ void __iomem *base = qspi->io_map; ++ int err = 0; ++ u32 mcr; ++ ++ if (qspi->cmd_interrupt) { ++ k1_qspi_enable_interrupt(qspi, QSPI_RSER_TFIE | BUFFER_ERROR_INT | ++ COMMAND_ERROR_INT); ++ init_completion(&qspi->cmd_completion); ++ } ++ ++ /* trigger LUT */ ++ qspi_writel(qspi, op->data.nbytes | QSPI_IPCR_SEQID(SEQID_LUT_SHARED_ID), ++ base + QSPI_IPCR); ++ ++ /* wait for the transaction complete */ ++ if (qspi->cmd_interrupt) ++ wait_for_completion(&qspi->cmd_completion); ++ else ++ err = k1_qspi_readl_poll_tout(qspi, base + QSPI_FR, QSPI_FR_TFF_MASK, ++ QSPI_WAIT_TIMEOUT*1000, QSPI_WAIT_BIT_SET); ++ ++ if (err) { ++ dev_err(qspi->dev, "opcode:0x%x transaction abort, ret:%d, error flag:0x%08x\n", ++ op->cmd.opcode, err, qspi->fr_error_flag); ++ dev_err(qspi->dev, "pmuap[0x%08x]:0x%08x\n", qspi->pmuap_reg, ++ qspi_readl(qspi, qspi->pmuap_addr)); ++ dev_err(qspi->dev, "mpmu[0x%08x]:0x%08x\n", K1_MPMU_ACGR, ++ qspi_readl(qspi, qspi->mpmu_acgr)); ++ goto tx_dma_unmap; ++ } ++ ++ err = k1_qspi_readl_poll_tout(qspi, base + QSPI_SR, QSPI_SR_BUSY, ++ QSPI_WAIT_TIMEOUT*1000, QSPI_WAIT_BIT_CLEAR); ++ if (err) { ++ dev_err(qspi->dev, "opcode:0x%x busy timeout, ret:%d\n", op->cmd.opcode, err); ++ goto tx_dma_unmap; ++ } ++ ++ /* read RX buffer for IP command read */ ++ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN) ++ k1_qspi_read_rxfifo(qspi, op); ++ ++ if (qspi->fr_error_flag & QSPI_FR_TBUF) { ++ /* abort current dma transfer */ ++ if (qspi->tx_dma_enable) ++ dmaengine_terminate_all(qspi->tx_dma); ++ ++ /* clear TX buf */ ++ mcr = qspi_readl(qspi, qspi->io_map + QSPI_MCR); ++ mcr |= QSPI_MCR_CLR_TXF_MASK; ++ qspi_writel(qspi, mcr, qspi->io_map + QSPI_MCR); ++ ++ /* reduce tx unit size and retry */ ++ if (qspi->tx_dma_enable) ++ qspi->tx_unit_size = qspi->tx_buf_size; ++ ++ err = -EAGAIN; ++ } else { ++ if (qspi->tx_dma_enable) ++ qspi->tx_unit_size = qspi->tx_buf_size; ++ } ++ ++tx_dma_unmap: ++ if (qspi->tx_wmrk) { ++ /* disable TBFDE interrupt and dma unmap */ ++ k1_qspi_disable_interrupt(qspi, QSPI_RSER_TBFDE); ++ spi_controller_dma_unmap_mem_op_data(qspi->ctrl, op, &qspi->sgt); ++ qspi->tx_wmrk = 0; ++ } ++ ++ return err; ++} ++ ++static void dump_spi_mem_op_info(struct k1_qspi *qspi, ++ const struct spi_mem_op *op) ++{ ++ dev_dbg(qspi->dev, "cmd.opcode:0x%x\n", op->cmd.opcode); ++ dev_dbg(qspi->dev, "cmd.buswidth:%d\n", op->cmd.buswidth); ++ dev_dbg(qspi->dev, "addr.nbytes:%d,\n", op->addr.nbytes); ++ dev_dbg(qspi->dev, "addr.buswidth:%d\n", op->addr.buswidth); ++ dev_dbg(qspi->dev, "addr.val:0x%llx\n", op->addr.val); ++ dev_dbg(qspi->dev, "dummy.nbytes:%d\n", op->dummy.nbytes); ++ dev_dbg(qspi->dev, "dummy.buswidth:%d\n", op->dummy.buswidth); ++ dev_dbg(qspi->dev, "%s data.nbytes:%d\n", ++ (op->data.dir == SPI_MEM_DATA_IN) ? "read" : "write", ++ op->data.nbytes); ++ dev_dbg(qspi->dev, "data.buswidth:%d\n", op->data.buswidth); ++ dev_dbg(qspi->dev, "data.buf:0x%p\n", op->data.buf.in); ++} ++ ++static int is_read_from_cache_opcode(u8 opcode) ++{ ++ int ret; ++ ++ ret = ((opcode == READ_FROM_CACHE_OP) || ++ (opcode == READ_FROM_CACHE_OP_Fast) || ++ (opcode == READ_FROM_CACHE_OP_X2) || ++ (opcode == READ_FROM_CACHE_OP_X4) || ++ (opcode == READ_FROM_CACHE_OP_DUALIO) || ++ (opcode == READ_FROM_CACHE_OP_QUADIO)); ++ ++ return ret; ++} ++ ++static int k1_qspi_check_buswidth(struct k1_qspi *qspi, u8 width) ++{ ++ switch (width) { ++ case 1: ++ case 2: ++ case 4: ++ return 0; ++ } ++ ++ return -EOPNOTSUPP; ++} ++ ++static bool k1_qspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) ++{ ++ struct k1_qspi *qspi = spi_controller_get_devdata(mem->spi->master); ++ int ret; ++ ++ mutex_lock(&qspi->lock); ++ ret = k1_qspi_check_buswidth(qspi, op->cmd.buswidth); ++ ++ if (op->addr.nbytes) ++ ret |= k1_qspi_check_buswidth(qspi, op->addr.buswidth); ++ ++ if (op->dummy.nbytes) ++ ret |= k1_qspi_check_buswidth(qspi, op->dummy.buswidth); ++ ++ if (op->data.nbytes) ++ ret |= k1_qspi_check_buswidth(qspi, op->data.buswidth); ++ ++ if (ret) { ++ mutex_unlock(&qspi->lock); ++ return false; ++ } ++ ++ /* address bytes should be equal to or less than 4 bytes */ ++ if (op->addr.nbytes > 4) { ++ mutex_unlock(&qspi->lock); ++ return false; ++ } ++ ++ /* check controller TX/RX buffer limits and alignment */ ++ if (op->data.dir == SPI_MEM_DATA_IN && ++ (op->data.nbytes > qspi->rx_unit_size || ++ (op->data.nbytes > qspi->rx_buf_size - 4 && !IS_ALIGNED(op->data.nbytes, 4)))) { ++ mutex_unlock(&qspi->lock); ++ return false; ++ } ++ ++ if (op->data.dir == SPI_MEM_DATA_OUT && op->data.nbytes > qspi->tx_unit_size) { ++ mutex_unlock(&qspi->lock); ++ return false; ++ } ++ ++ /* ++ * If requested address value is greater than controller assigned ++ * memory mapped space, return error as it didn't fit in the range. ++ */ ++ if (op->addr.val >= qspi->memmap_size) { ++ pr_err("%s: addr.val:%lld greater than the map size\n", ++ __func__, op->addr.val); ++ mutex_unlock(&qspi->lock); ++ return false; ++ } ++ ++ /* number of dummy clock cycles should be <= 64 cycles */ ++ if (op->dummy.buswidth && ++ (op->dummy.nbytes * 8 / op->dummy.buswidth > 64)) { ++ mutex_unlock(&qspi->lock); ++ return false; ++ } ++ ++ mutex_unlock(&qspi->lock); ++ return true; ++} ++ ++static const char *k1_qspi_get_name(struct spi_mem *mem) ++{ ++ ++ struct k1_qspi *qspi = spi_master_get_devdata(mem->spi->master); ++ struct device *dev = qspi->dev; ++ const char *name; ++ ++ name = devm_kasprintf(dev, GFP_KERNEL, ++ "%s-%d", dev_name(dev), ++ mem->spi->chip_select); ++ ++ if (!name) { ++ dev_err(dev, "failed to get memory for custom flash name\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ return name; ++} ++ ++static int k1_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) ++{ ++ struct k1_qspi *qspi = spi_controller_get_devdata(mem->spi->master); ++ int err = 0; ++ u32 mask; ++ u32 reg; ++ void __iomem *base; ++ ++ base = qspi->io_map; ++ ++ mutex_lock(&qspi->lock); ++ ++ dump_spi_mem_op_info(qspi, op); ++ ++ /* wait for controller being ready */ ++ mask = QSPI_SR_BUSY | QSPI_SR_IP_ACC_MASK | QSPI_SR_AHB_ACC_MASK; ++ err = k1_qspi_readl_poll_tout(qspi, base + QSPI_SR, mask, ++ QSPI_WAIT_TIMEOUT*1000, QSPI_WAIT_BIT_CLEAR); ++ if (err) { ++ dev_err(qspi->dev, "controller not ready!\n"); ++ dev_err(qspi->dev, "pmuap[0x%08x]:0x%08x\n", qspi->pmuap_reg, ++ qspi_readl(qspi, qspi->pmuap_addr)); ++ dev_err(qspi->dev, "mpmu[0x%08x]:0x%08x\n", K1_MPMU_ACGR, ++ qspi_readl(qspi, qspi->mpmu_acgr)); ++ mutex_unlock(&qspi->lock); ++ return err; ++ } ++ ++ /* clear TX/RX buffer before transaction */ ++ reg = qspi_readl(qspi, base + QSPI_MCR); ++ reg |= QSPI_MCR_CLR_TXF_MASK | QSPI_MCR_CLR_RXF_MASK; ++ qspi_writel(qspi, reg, base + QSPI_MCR); ++ ++ /* ++ * reset the sequence pointers whenever the sequence ID is changed by ++ * updating the SEDID filed in QSPI_IPCR OR QSPI_BFGENCR. ++ */ ++ reg = qspi_readl(qspi, base + QSPI_SPTRCLR); ++ reg |= (QSPI_SPTRCLR_IPPTRC | QSPI_SPTRCLR_BFPTRC); ++ qspi_writel(qspi, reg, base + QSPI_SPTRCLR); ++ ++ /* set the flash address into the QSPI_SFAR */ ++ qspi_write_sfar(qspi, qspi->memmap_base + op->addr.val); ++ ++ /* clear QSPI_FR before trigger LUT command */ ++ reg = qspi_readl(qspi, base + QSPI_FR); ++ if (reg) ++ qspi_writel(qspi, reg, base + QSPI_FR); ++ qspi->fr_error_flag = 0; ++ ++ /* ++ * read page command 13h must be done by IP command. ++ * read from cache through the AHB bus by accessing the mapped memory. ++ * In all other cases we use IP commands to access the flash. ++ */ ++ if (op->data.nbytes > (qspi->rx_buf_size - 4) && ++ op->data.dir == SPI_MEM_DATA_IN && ++ qspi->ahb_read_enable && ++ is_read_from_cache_opcode(op->cmd.opcode)) { ++ k1_qspi_prepare_lut(qspi, op, SEQID_LUT_AHBREAD_ID); ++ err = k1_qspi_ahb_read(qspi, op); ++ } else { ++ /* IP command */ ++ k1_qspi_prepare_lut(qspi, op, SEQID_LUT_SHARED_ID); ++ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) ++ err = k1_qspi_fill_txfifo(qspi, op); ++ if (!err) ++ err = k1_qspi_do_op(qspi, op); ++ } ++ ++ /* invalidate the data in the AHB buffer. */ ++ k1_qspi_invalid(qspi); ++ ++ mutex_unlock(&qspi->lock); ++ ++ return err; ++} ++ ++static int k1_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) ++{ ++ struct k1_qspi *qspi = spi_controller_get_devdata(mem->spi->master); ++ ++ mutex_lock(&qspi->lock); ++ if (op->data.dir == SPI_MEM_DATA_OUT) { ++ if (op->data.nbytes > qspi->tx_unit_size) ++ op->data.nbytes = qspi->tx_unit_size; ++ } else { ++ if (op->data.nbytes > qspi->rx_unit_size) ++ op->data.nbytes = qspi->rx_unit_size; ++ else if (op->data.nbytes > qspi->rx_buf_size - 4 && ++ !IS_ALIGNED(op->data.nbytes, 4)) ++ op->data.nbytes = qspi->rx_buf_size - 4; ++ } ++ mutex_unlock(&qspi->lock); ++ ++ return 0; ++} ++ ++static int k1_qspi_host_init(struct k1_qspi *qspi) ++{ ++ void __iomem *base = qspi->io_map; ++ u32 reg; ++ ++ qspi->resets = devm_reset_control_array_get_optional_exclusive(qspi->dev); ++ if (IS_ERR(qspi->resets)) { ++ dev_err(qspi->dev, "Failed to get qspi's resets\n"); ++ return PTR_ERR(qspi->resets); ++ } ++ ++ /* config mfp */ ++ qspi_config_mfp(qspi); ++ ++ reset_control_assert(qspi->resets); ++ /* set PMUap */ ++ qspi_set_func_clk(qspi); ++ reset_control_deassert(qspi->resets); ++ ++ /* rest qspi */ ++ qspi_reset(qspi); ++ ++ /* clock settings */ ++ qspi_enter_mode(qspi, QSPI_DISABLE_MODE); ++ ++ /* sampled by sfif_clk_b; half cycle delay; */ ++ if (qspi->max_hz < (QSPI_SMPR_FSPHS_CLK >> 2)) ++ qspi_writel(qspi, 0x0, base + QSPI_SMPR); ++ else ++ qspi_writel(qspi, QSPI_SMPR_FSPHS_MASK, base + QSPI_SMPR); ++ ++ /* Fix write failure issue*/ ++ qspi_writel(qspi, 0x8, base + QSPI_SOCCR); ++ ++ /* set the default source address QSPI_AMBA_BASE*/ ++ qspi_write_sfar(qspi, qspi->memmap_base); ++ qspi_writel(qspi, 0x0, base + QSPI_SFACR); ++ ++ /* config ahb read */ ++ qspi_init_ahbread(qspi, SEQID_LUT_AHBREAD_ID); ++ ++ /* set flash memory map */ ++ qspi_writel(qspi, qspi->sfa1ad & 0xfffffc00, base + QSPI_SFA1AD); ++ qspi_writel(qspi, qspi->sfa2ad & 0xfffffc00, base + QSPI_SFA2AD); ++ qspi_writel(qspi, qspi->sfb1ad & 0xfffffc00, base + QSPI_SFB1AD); ++ qspi_writel(qspi, qspi->sfb2ad & 0xfffffc00, base + QSPI_SFB2AD); ++ ++ /* ISD3FB, ISD2FB, ISD3FA, ISD2FA = 1; END_CFG=0x3 */ ++ reg = qspi_readl(qspi, base + QSPI_MCR); ++ reg |= QSPI_MCR_END_CFG_MASK | QSPI_MCR_ISD_MASK; ++ qspi_writel(qspi, reg, base + QSPI_MCR); ++ ++ /* Module enabled */ ++ qspi_enter_mode(qspi, QSPI_NORMAL_MODE); ++ ++ /* Read using the IP Bus registers QSPI_RBDR0 to QSPI_RBDR31*/ ++ qspi_write_rbct(qspi, QSPI_RBCT_RXBRD_MASK); ++ ++ /* clear all interrupt status */ ++ qspi_writel(qspi, 0xffffffff, base + QSPI_FR); ++ ++ dev_dbg(qspi->dev, "qspi host init done.\n"); ++ ++ return 0; ++} ++ ++static const struct spi_controller_mem_ops k1_qspi_mem_ops = { ++ .adjust_op_size = k1_qspi_adjust_op_size, ++ .supports_op = k1_qspi_supports_op, ++ .exec_op = k1_qspi_exec_op, ++ .get_name = k1_qspi_get_name, ++}; ++ ++static int k1_qspi_probe(struct platform_device *pdev) ++{ ++ struct spi_controller *ctlr; ++ struct device *dev = &pdev->dev; ++ struct device_node *np = dev->of_node; ++ struct k1_qspi *qspi; ++ struct resource *res; ++ ++ int ret = 0; ++ u32 qspi_bus_num = 0; ++ int host_irq = 0; ++ ++ ctlr = spi_alloc_master(&pdev->dev, sizeof(struct k1_qspi)); ++ if (!ctlr) ++ return -ENOMEM; ++ ++ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; ++ qspi = spi_controller_get_devdata(ctlr); ++ qspi->dev = dev; ++ qspi->ctrl = ctlr; ++ ++ platform_set_drvdata(pdev, qspi); ++ ++ /* get qspi frequency */ ++ if (of_property_read_u32(dev->of_node, "k1,qspi-freq", &qspi->max_hz)) { ++ dev_err(dev, "failed to get qspi frequency\n"); ++ goto err_put_ctrl; ++ } ++ ++ /* get qspi register base address */ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi-base"); ++ qspi->io_map = devm_ioremap_resource(dev, res); ++ if (IS_ERR(qspi->io_map)) { ++ ret = PTR_ERR(qspi->io_map); ++ goto err_put_ctrl; ++ } ++ qspi->io_phys = res->start; ++ ++ /* get qspi memory-map address */ ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi-mmap"); ++ qspi->ahb_map = devm_ioremap_resource(dev, res); ++ if (IS_ERR(qspi->ahb_map)) { ++ ret = PTR_ERR(qspi->ahb_map); ++ goto err_put_ctrl; ++ } ++ ++ qspi->memmap_base = res->start; ++ qspi->memmap_size = resource_size(res); ++ ++ if (of_property_read_u32(dev->of_node, "k1,qspi-sfa1ad", &qspi->sfa1ad)) ++ qspi->sfa1ad = QSPI_FLASH_A1_TOP; ++ else ++ qspi->sfa1ad += qspi->memmap_base; ++ if (of_property_read_u32(dev->of_node, "k1,qspi-sfa2ad", &qspi->sfa2ad)) ++ qspi->sfa2ad = QSPI_FLASH_A2_TOP; ++ else ++ qspi->sfa2ad += qspi->sfa1ad; ++ if (of_property_read_u32(dev->of_node, "k1,qspi-sfb1ad", &qspi->sfb1ad)) ++ qspi->sfb1ad = QSPI_FLASH_B1_TOP; ++ else ++ qspi->sfb1ad = qspi->sfa2ad; ++ if (of_property_read_u32(dev->of_node, "k1,qspi-sfb2ad", &qspi->sfb2ad)) ++ qspi->sfb2ad = QSPI_FLASH_B2_TOP; ++ else ++ qspi->sfb2ad += qspi->sfb1ad; ++ ++ dev_dbg(dev, "%s:memmap base:0x%pa, memmap size:0x%x\n", ++ __func__, &qspi->memmap_base, qspi->memmap_size); ++ ++ host_irq = platform_get_irq(pdev, 0); ++ if (host_irq < 0) { ++ dev_err(dev, "invalid host irq:%d\n", host_irq); ++ goto err_put_ctrl; ++ } ++ ret = devm_request_irq(dev, host_irq, k1_qspi_irq_handler, ++ 0, pdev->name, qspi); ++ if (ret) { ++ dev_err(dev, "failed to request irq:%d\n", ret); ++ goto err_put_ctrl; ++ } ++ init_completion(&qspi->cmd_completion); ++ dev_dbg(qspi->dev, "%s: host_irq:%d\n", __func__, host_irq); ++ ++ /* map QSPI PMUap register address */ ++ if (of_property_read_u32(dev->of_node, "k1,qspi-pmuap-reg", &qspi->pmuap_reg)) ++ qspi->pmuap_reg = PMUA_QSPI_CLK_RES_CTRL; ++ qspi->pmuap_addr = ioremap(qspi->pmuap_reg, 4); ++ ++ /* map QSPI MPMU ACGR register address */ ++ if (of_property_read_u32(dev->of_node, "k1,qspi-mpmu-acgr-reg", &qspi->mpmu_acgr_reg)) ++ qspi->mpmu_acgr_reg = K1_MPMU_ACGR; ++ qspi->mpmu_acgr = ioremap(qspi->mpmu_acgr_reg, 4); ++ ++ if (of_property_read_u32(dev->of_node, "k1,qspi-rx-buf", &qspi->rx_buf_size)) ++ qspi->rx_buf_size = QSPI_RX_BUFF_MAX; ++ ++ if (of_property_read_u32(dev->of_node, "k1,qspi-tx-buf", &qspi->tx_buf_size)) ++ qspi->tx_buf_size = QSPI_TX_BUFF_MAX; ++ ++ if (of_property_read_u32(dev->of_node, "k1,qspi-ahb-buf", &qspi->ahb_buf_size)) ++ qspi->ahb_buf_size = QSPI_AHB_BUFF_MAX_SIZE; ++ ++ if (of_property_read_u32(dev->of_node, "k1,qspi-ahb-enable", &qspi->ahb_read_enable)) ++ qspi->ahb_read_enable = 1; ++ ++ if (of_property_read_u32(dev->of_node, "k1,qspi-interrupt", &qspi->cmd_interrupt)) ++ qspi->cmd_interrupt = 1; ++ ++ if (of_property_read_u32(dev->of_node, "k1,qspi-endian-xchg", &qspi->endian_xchg)) ++ qspi->endian_xchg = 0; ++ ++ if (of_property_read_u32(dev->of_node, "k1,qspi-cs", &qspi->cs_selected)) ++ qspi->cs_selected = QSPI_DEFAULT_CS; ++ ++ if (of_property_read_u32(dev->of_node, "k1,qspi-tx-dma", &qspi->tx_dma_enable)) ++ qspi->tx_dma_enable = 0; ++ ++ if (of_property_read_u32(dev->of_node, "k1,qspi-rx-dma", &qspi->rx_dma_enable)) ++ qspi->rx_dma_enable = 0; ++ ++ k1_qspi_prepare_dma(qspi); ++ mutex_init(&qspi->lock); ++ ++ /* set the qspi device default index */ ++ if (of_property_read_u32(dev->of_node, "k1,qspi-id", &qspi_bus_num)) ++ ctlr->bus_num = 0; ++ else ++ ctlr->bus_num = qspi_bus_num; ++ ctlr->num_chipselect = 1; ++ ctlr->mem_ops = &k1_qspi_mem_ops; ++ ++ dev_dbg(dev, "%s: rx_buf_size:%d, tx_buf_size:%d\n", ++ __func__, qspi->rx_buf_size, qspi->tx_buf_size); ++ dev_dbg(dev, "%s: ahb_buf_size:%d, ahb_read:%d\n", ++ __func__, qspi->ahb_buf_size, qspi->ahb_read_enable); ++ ++ if (qspi->tx_dma_enable) ++ qspi->tx_unit_size = qspi->tx_buf_size; ++ else ++ qspi->tx_unit_size = qspi->tx_buf_size; ++ ++ if (qspi->ahb_read_enable) ++ qspi->rx_unit_size = SZ_4K; ++ else ++ qspi->rx_unit_size = qspi->rx_buf_size; ++ k1_qspi_host_init(qspi); ++ ++ ctlr->dev.of_node = np; ++ ctlr->dev.parent = &pdev->dev; ++ ctlr->use_gpio_descriptors = true; ++ ret = spi_register_controller(ctlr); ++ if (ret) ++ goto err_destroy_mutex; ++ ++#ifdef CONFIG_SYSFS ++ ret = sysfs_create_group(&(pdev->dev.kobj), ++ (const struct attribute_group *)(&qspi_dev_group)); ++ if (ret) { ++ dev_err(dev, "failed to create attr group for qspi dev!\n"); ++ goto err_destroy_mutex; ++ } ++#endif ++ ++ return 0; ++ ++err_destroy_mutex: ++ mutex_destroy(&qspi->lock); ++ iounmap(qspi->pmuap_addr); ++ ++err_put_ctrl: ++ spi_controller_put(ctlr); ++ ++ dev_err(dev, "K1 QSPI probe failed\n"); ++ return ret; ++} ++ ++static int k1_qspi_remove(struct platform_device *pdev) ++{ ++ struct k1_qspi *qspi = platform_get_drvdata(pdev); ++ ++ /* set disable mode */ ++ qspi_writel(qspi, QSPI_MCR_MDIS_MASK, qspi->io_map + QSPI_MCR); ++ qspi_writel(qspi, 0x0, qspi->io_map + QSPI_RSER); ++ ++ if (qspi->tx_dma) ++ dma_release_channel(qspi->tx_dma); ++ if (qspi->rx_dma) ++ dma_release_channel(qspi->rx_dma); ++ ++ mutex_destroy(&qspi->lock); ++ iounmap(qspi->pmuap_addr); ++ ++ reset_control_assert(qspi->resets); ++ clk_disable_unprepare(qspi->clk); ++ clk_disable_unprepare(qspi->bus_clk); ++ ++#ifdef CONFIG_SYSFS ++ sysfs_remove_group(&(pdev->dev.kobj), ++ (const struct attribute_group *)(&qspi_dev_group)); ++#endif ++ return 0; ++} ++ ++static const struct of_device_id k1_qspi_dt_ids[] = { ++ { .compatible = "spacemit,k1-qspi", }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, k1_qspi_dt_ids); ++ ++static struct platform_driver k1_qspi_driver = { ++ .driver = { ++ .name = "k1-qspi", ++ .of_match_table = k1_qspi_dt_ids, ++ }, ++ .probe = k1_qspi_probe, ++ .remove = k1_qspi_remove, ++}; ++module_platform_driver(k1_qspi_driver); ++ ++MODULE_DESCRIPTION("Spacemit k1 qspi controller driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/spi/spi-spacemit-k1.c b/drivers/spi/spi-spacemit-k1.c +new file mode 100644 +index 000000000000..7c294b123738 +--- /dev/null ++++ b/drivers/spi/spi-spacemit-k1.c +@@ -0,0 +1,1281 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Support for Spacemit k1 spi controller ++ * ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "spi-spacemit-k1.h" ++ ++#define TIMOUT_DFLT 3000 ++#define TIMOUT_DFLT_SLAVE 0x40000 ++ ++ ++static bool k1_spi_txfifo_full(const struct spi_driver_data *drv_data) ++{ ++ return !(k1_spi_read(drv_data, STATUS) & STATUS_TNF); ++} ++ ++static u32 k1_configure_topctrl(const struct spi_driver_data *drv_data, u8 bits) ++{ ++ /* ++ * set Motorola Frame Format ++ * set DSS ++ */ ++ return TOP_FRF_Motorola | TOP_DSS(bits); ++} ++ ++static void cs_assert(struct spi_driver_data *drv_data) ++{ ++ struct chip_data *chip = drv_data->cur_chip; ++ ++ if (chip->cs_control) { ++ chip->cs_control(K1_SPI_CS_ASSERT); ++ return; ++ } ++ ++ if (gpio_is_valid(chip->gpio_cs)) { ++ gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted); ++ return; ++ } ++} ++ ++static void cs_deassert(struct spi_driver_data *drv_data) ++{ ++ struct chip_data *chip = drv_data->cur_chip; ++ ++ if (chip->cs_control) { ++ chip->cs_control(K1_SPI_CS_DEASSERT); ++ return; ++ } ++ ++ if (gpio_is_valid(chip->gpio_cs)) { ++ gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); ++ return; ++ } ++} ++ ++/* clear all rx fifo useless data */ ++int k1_spi_flush(struct spi_driver_data *drv_data) ++{ ++ unsigned long limit = loops_per_jiffy << 1; ++ ++ do { ++ while (k1_spi_read(drv_data, STATUS) & STATUS_RNE) ++ k1_spi_read(drv_data, DATAR); ++ } while ((k1_spi_read(drv_data, STATUS) & STATUS_BSY) && --limit); ++ k1_spi_write(drv_data, STATUS, STATUS_ROR); ++ ++ return limit; ++} ++ ++static int null_writer(struct spi_driver_data *drv_data) ++{ ++ u8 n_bytes = drv_data->n_bytes; ++ ++ if (k1_spi_txfifo_full(drv_data) || (drv_data->tx == drv_data->tx_end)) ++ return 0; ++ ++ k1_spi_write(drv_data, DATAR, 0); ++ drv_data->tx += n_bytes; ++ ++ return 1; ++} ++ ++static int null_reader(struct spi_driver_data *drv_data) ++{ ++ u8 n_bytes = drv_data->n_bytes; ++ ++ while ((k1_spi_read(drv_data, STATUS) & STATUS_RNE) && ++ (drv_data->rx < drv_data->rx_end)) { ++ k1_spi_read(drv_data, DATAR); ++ drv_data->rx += n_bytes; ++ } ++ ++ return drv_data->rx == drv_data->rx_end; ++} ++ ++static int u8_writer(struct spi_driver_data *drv_data) ++{ ++ if (k1_spi_txfifo_full(drv_data) || (drv_data->tx == drv_data->tx_end)) ++ return 0; ++ ++ k1_spi_write(drv_data, DATAR, *(u8 *)(drv_data->tx)); ++ ++drv_data->tx; ++ ++ return 1; ++} ++ ++static int u8_reader(struct spi_driver_data *drv_data) ++{ ++ while ((k1_spi_read(drv_data, STATUS) & STATUS_RNE) && ++ (drv_data->rx < drv_data->rx_end)) { ++ *(u8 *)(drv_data->rx) = k1_spi_read(drv_data, DATAR); ++ ++drv_data->rx; ++ } ++ ++ return drv_data->rx == drv_data->rx_end; ++} ++ ++static int u16_writer(struct spi_driver_data *drv_data) ++{ ++ if (k1_spi_txfifo_full(drv_data) || (drv_data->tx == drv_data->tx_end)) ++ return 0; ++ ++ k1_spi_write(drv_data, DATAR, *(u16 *)(drv_data->tx)); ++ drv_data->tx += 2; ++ ++ return 1; ++} ++ ++static int u16_reader(struct spi_driver_data *drv_data) ++{ ++ while ((k1_spi_read(drv_data, STATUS) & STATUS_RNE) && ++ (drv_data->rx < drv_data->rx_end)) { ++ *(u16 *)(drv_data->rx) = k1_spi_read(drv_data, DATAR); ++ drv_data->rx += 2; ++ } ++ ++ return drv_data->rx == drv_data->rx_end; ++} ++ ++static int u32_writer(struct spi_driver_data *drv_data) ++{ ++ if (k1_spi_txfifo_full(drv_data) || (drv_data->tx == drv_data->tx_end)) ++ return 0; ++ ++ k1_spi_write(drv_data, DATAR, *(u32 *)(drv_data->tx)); ++ drv_data->tx += 4; ++ ++ return 1; ++} ++ ++static int u32_reader(struct spi_driver_data *drv_data) ++{ ++ while ((k1_spi_read(drv_data, STATUS) & STATUS_RNE) && ++ (drv_data->rx < drv_data->rx_end)) { ++ *(u32 *)(drv_data->rx) = k1_spi_read(drv_data, DATAR); ++ drv_data->rx += 4; ++ } ++ ++ return drv_data->rx == drv_data->rx_end; ++} ++ ++void *k1_spi_next_transfer(struct spi_driver_data *drv_data) ++{ ++ struct spi_message *msg = drv_data->cur_msg; ++ struct spi_transfer *trans = drv_data->cur_transfer; ++ ++ /* Move to next transfer */ ++ if (trans->transfer_list.next != &msg->transfers) { ++ drv_data->cur_transfer = list_entry(trans->transfer_list.next, ++ struct spi_transfer, ++ transfer_list); ++ return RUNNING_STATE; ++ } else ++ return DONE_STATE; ++} ++ ++/* caller already set message->status; dma and pio irqs are blocked */ ++static void giveback(struct spi_driver_data *drv_data) ++{ ++ struct spi_transfer *last_transfer; ++ struct spi_message *msg; ++ ++ msg = drv_data->cur_msg; ++ drv_data->cur_msg = NULL; ++ drv_data->cur_transfer = NULL; ++ ++ last_transfer = list_last_entry(&msg->transfers, struct spi_transfer, transfer_list); ++ ++ /* Delay if requested before any change in chip select */ ++ spi_transfer_delay_exec(last_transfer); ++ ++ /* Drop chip select UNLESS cs_change is true or we are returning ++ * a message with an error, or next message is for another chip ++ */ ++ if (!last_transfer->cs_change) ++ cs_deassert(drv_data); ++ else { ++ struct spi_message *next_msg; ++ ++ /* Holding of cs was hinted, but we need to make sure ++ * the next message is for the same chip. Don't waste ++ * time with the following tests unless this was hinted. ++ * ++ * We cannot postpone this until pump_messages, because ++ * after calling msg->complete (below) the driver that ++ * sent the current message could be unloaded, which ++ * could invalidate the cs_control() callback... ++ */ ++ ++ /* get a pointer to the next message, if any */ ++ next_msg = spi_get_next_queued_message(drv_data->master); ++ ++ /* see if the next and current messages point ++ * to the same chip ++ */ ++ if (next_msg && next_msg->spi != msg->spi) ++ next_msg = NULL; ++ if (!next_msg || msg->state == ERROR_STATE) ++ cs_deassert(drv_data); ++ } ++ ++ drv_data->cur_chip = NULL; ++ spi_finalize_current_message(drv_data->master); ++ ++ complete(&drv_data->cur_msg_completion); ++} ++ ++static void reset_fifo_ctrl(struct spi_driver_data *drv_data) ++{ ++ struct chip_data *chip = drv_data->cur_chip; ++ u32 fifo_ctrl = 0; ++ ++ fifo_ctrl |= chip->threshold; ++ k1_spi_write(drv_data, FIFO_CTRL, fifo_ctrl); ++} ++ ++static void reset_int_en(struct spi_driver_data *drv_data) ++{ ++ u32 int_en = 0; ++ ++ int_en = k1_spi_read(drv_data, INT_EN); ++ int_en &= ~drv_data->int_cr; ++ k1_spi_write(drv_data, INT_EN, int_en); ++} ++ ++static void int_error_stop(struct spi_driver_data *drv_data, const char *msg) ++{ ++ /* Stop and reset spi controller */ ++ k1_spi_write(drv_data, STATUS, drv_data->clear_sr); ++ reset_fifo_ctrl(drv_data); ++ reset_int_en(drv_data); ++ k1_spi_write(drv_data, TO, 0); ++ k1_spi_flush(drv_data); ++ k1_spi_write(drv_data, TOP_CTRL, k1_spi_read(drv_data, TOP_CTRL) & ++ ~(TOP_SSE | TOP_HOLD_FRAME_LOW)); ++ dev_err(&drv_data->pdev->dev, "%s\n", msg); ++ ++ drv_data->cur_msg->state = ERROR_STATE; ++ queue_work(system_wq, &drv_data->pump_transfers); ++} ++ ++static void int_transfer_complete(struct spi_driver_data *drv_data) ++{ ++ /* Stop spi controller */ ++ k1_spi_write(drv_data, STATUS, drv_data->clear_sr); ++ reset_fifo_ctrl(drv_data); ++ reset_int_en(drv_data); ++ k1_spi_write(drv_data, TO, 0); ++ ++ /* Update total byte transferred return count actual bytes read */ ++ drv_data->cur_msg->actual_length += drv_data->len - (drv_data->rx_end - drv_data->rx); ++ ++ /* Move to next transfer */ ++ drv_data->cur_msg->state = k1_spi_next_transfer(drv_data); ++ ++ /* Schedule transfer tasklet */ ++ queue_work(system_wq, &drv_data->pump_transfers); ++} ++ ++static irqreturn_t interrupt_transfer(struct spi_driver_data *drv_data) ++{ ++ u32 irq_mask = (k1_spi_read(drv_data, INT_EN) & INT_EN_TIE) ? ++ drv_data->mask_sr : drv_data->mask_sr & ~STATUS_TFS; ++ ++ u32 irq_status = k1_spi_read(drv_data, STATUS) & irq_mask; ++ ++ if (irq_status & STATUS_ROR) { ++ int_error_stop(drv_data, "fifo overrun"); ++ return IRQ_HANDLED; ++ } ++ ++ if (irq_status & STATUS_TINT) { ++ k1_spi_write(drv_data, STATUS, STATUS_TINT); ++ if (drv_data->read(drv_data)) { ++ int_transfer_complete(drv_data); ++ return IRQ_HANDLED; ++ } ++ } ++ ++ /* Drain rx fifo, Fill tx fifo and prevent overruns */ ++ do { ++ if (drv_data->read(drv_data)) { ++ int_transfer_complete(drv_data); ++ return IRQ_HANDLED; ++ } ++ } while (drv_data->write(drv_data)); ++ ++ if (drv_data->read(drv_data)) { ++ int_transfer_complete(drv_data); ++ return IRQ_HANDLED; ++ } ++ ++ if (drv_data->tx == drv_data->tx_end) { ++ u32 int_en; ++ ++ int_en = k1_spi_read(drv_data, INT_EN); ++ int_en &= ~INT_EN_TIE; ++ ++ k1_spi_write(drv_data, INT_EN, int_en); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t spi_int(int irq, void *dev_id) ++{ ++ struct spi_driver_data *drv_data = dev_id; ++ u32 int_en; ++ u32 mask = drv_data->mask_sr; ++ u32 int_status; ++ ++ /* ++ * The IRQ might be shared with other peripherals so we must first ++ * check that are we RPM suspended or not. If we are we assume that ++ * the IRQ was not for us (we shouldn't be RPM suspended when the ++ * interrupt is enabled). ++ */ ++ if (pm_runtime_suspended(&drv_data->pdev->dev)) ++ return IRQ_NONE; ++ ++ /* ++ * If the device is not yet in RPM suspended state and we get an ++ * interrupt that is meant for another device, check if status bits ++ * are all set to one. That means that the device is already ++ * powered off. ++ */ ++ int_status = k1_spi_read(drv_data, STATUS); ++ if (int_status == ~0) ++ return IRQ_NONE; ++ ++ int_en = k1_spi_read(drv_data, INT_EN); ++ ++ /* Ignore possible writes if we don't need to write */ ++ if (!(int_en & INT_EN_TIE)) ++ mask &= ~STATUS_TFS; ++ ++ /* Ignore RX timeout interrupt if it is disabled */ ++ if (!(int_en & INT_EN_TINTE)) ++ mask &= ~STATUS_TINT; ++ ++ if (!(int_status & mask)) ++ return IRQ_NONE; ++ ++ if (!drv_data->cur_msg) { ++ k1_spi_write(drv_data, TOP_CTRL, k1_spi_read(drv_data, TOP_CTRL) & ++ ~(TOP_SSE | TOP_HOLD_FRAME_LOW)); ++ k1_spi_write(drv_data, INT_EN, k1_spi_read(drv_data, INT_EN) & ++ ~drv_data->int_cr); ++ k1_spi_write(drv_data, TO, 0); ++ k1_spi_write(drv_data, STATUS, drv_data->clear_sr); ++ ++ dev_err(&drv_data->pdev->dev, "bad message state in interrupt handler\n"); ++ ++ return IRQ_HANDLED; ++ } ++ ++ return drv_data->transfer_handler(drv_data); ++} ++ ++static int k1_spi_map_dma_buffer(struct spi_driver_data *drv_data, enum dma_data_direction dir) ++{ ++ int i, nents, ret, len = drv_data->len; ++ struct scatterlist *sg; ++ struct device *dmadev; ++ struct sg_table *sgt; ++ void *buf, *pbuf; ++ ++ if (dir == DMA_TO_DEVICE) { ++ dmadev = drv_data->tx_chan->device->dev; ++ sgt = &drv_data->tx_sgt; ++ buf = drv_data->tx; ++ drv_data->tx_map_len = len; ++ } else { ++ dmadev = drv_data->rx_chan->device->dev; ++ sgt = &drv_data->rx_sgt; ++ buf = drv_data->rx; ++ drv_data->rx_map_len = len; ++ } ++ ++ nents = DIV_ROUND_UP(len, SZ_2K); ++ if (nents != sgt->nents) { ++ sg_free_table(sgt); ++ ret = sg_alloc_table(sgt, nents, GFP_ATOMIC); ++ if (ret) ++ return ret; ++ } ++ ++ pbuf = buf; ++ for_each_sg(sgt->sgl, sg, sgt->nents, i) { ++ size_t bytes = min_t(size_t, len, SZ_2K); ++ ++ if (buf) ++ sg_set_buf(sg, pbuf, bytes); ++ else ++ sg_set_buf(sg, drv_data->dummy, bytes); ++ ++ pbuf += bytes; ++ len -= bytes; ++ } ++ ++ nents = dma_map_sg(dmadev, sgt->sgl, sgt->nents, dir); ++ if (!nents) ++ return -ENOMEM; ++ ++ return nents; ++} ++ ++static void k1_spi_unmap_dma_buffer(struct spi_driver_data *drv_data, enum dma_data_direction dir) ++{ ++ struct device *dmadev; ++ struct sg_table *sgt; ++ ++ if (dir == DMA_TO_DEVICE) { ++ dmadev = drv_data->tx_chan->device->dev; ++ sgt = &drv_data->tx_sgt; ++ } else { ++ dmadev = drv_data->rx_chan->device->dev; ++ sgt = &drv_data->rx_sgt; ++ } ++ ++ dma_unmap_sg(dmadev, sgt->sgl, sgt->nents, dir); ++} ++ ++static void k1_spi_unmap_dma_buffers(struct spi_driver_data *drv_data) ++{ ++ if (!drv_data->dma_mapped) ++ return; ++ ++ k1_spi_unmap_dma_buffer(drv_data, DMA_FROM_DEVICE); ++ k1_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE); ++ ++ drv_data->dma_mapped = 0; ++} ++ ++static void k1_spi_dma_transfer_complete(struct spi_driver_data *drv_data, bool error) ++{ ++ struct spi_message *msg = drv_data->cur_msg; ++ ++ /* ++ * It is possible that one CPU is handling ROR interrupt and other ++ * just gets DMA completion. Calling pump_transfers() twice for the ++ * same transfer leads to problems thus we prevent concurrent calls ++ * by using ->dma_running. ++ */ ++ if (atomic_dec_and_test(&drv_data->dma_running)) { ++ /* ++ * If the other CPU is still handling the ROR interrupt we ++ * might not know about the error yet. So we re-check the ++ * ROR bit here before we clear the status register. ++ */ ++ if (!error) { ++ u32 status = k1_spi_read(drv_data, STATUS) ++ & drv_data->mask_sr; ++ error = status & STATUS_ROR; ++ } ++ ++ /* Clear status & disable interrupts */ ++ k1_spi_write(drv_data, FIFO_CTRL, ++ k1_spi_read(drv_data, FIFO_CTRL) ++ & ~drv_data->dma_fifo_ctrl); ++ k1_spi_write(drv_data, TOP_CTRL, ++ k1_spi_read(drv_data, TOP_CTRL) ++ & ~drv_data->dma_top_ctrl); ++ k1_spi_write(drv_data, STATUS, drv_data->clear_sr); ++ k1_spi_write(drv_data, TO, 0); ++ ++ if (!error) { ++ k1_spi_unmap_dma_buffers(drv_data); ++ ++ drv_data->tx += drv_data->tx_map_len; ++ drv_data->rx += drv_data->rx_map_len; ++ ++ msg->actual_length += drv_data->len; ++ msg->state = k1_spi_next_transfer(drv_data); ++ } else { ++ /* In case we got an error we disable the spi now */ ++ k1_spi_write(drv_data, TOP_CTRL, ++ k1_spi_read(drv_data, TOP_CTRL) & ~TOP_SSE); ++ ++ msg->state = ERROR_STATE; ++ } ++ queue_work(system_wq, &drv_data->pump_transfers); ++ } ++} ++ ++static void k1_spi_dma_callback(void *data) ++{ ++ k1_spi_dma_transfer_complete(data, false); ++} ++ ++static struct dma_async_tx_descriptor * ++k1_spi_dma_prepare_one(struct spi_driver_data *drv_data, enum dma_transfer_direction dir) ++{ ++ struct chip_data *chip = drv_data->cur_chip; ++ enum dma_slave_buswidth width; ++ struct dma_slave_config cfg; ++ struct dma_chan *chan; ++ struct sg_table *sgt; ++ int nents, ret; ++ ++ switch (drv_data->n_bytes) { ++ case 1: ++ width = DMA_SLAVE_BUSWIDTH_1_BYTE; ++ break; ++ case 2: ++ width = DMA_SLAVE_BUSWIDTH_2_BYTES; ++ break; ++ default: ++ width = DMA_SLAVE_BUSWIDTH_4_BYTES; ++ break; ++ } ++ ++ memset(&cfg, 0, sizeof(cfg)); ++ cfg.direction = dir; ++ ++ if (dir == DMA_MEM_TO_DEV) { ++ cfg.dst_addr = drv_data->ssdr_physical; ++ cfg.dst_addr_width = width; ++ cfg.dst_maxburst = chip->dma_burst_size; ++ ++ sgt = &drv_data->tx_sgt; ++ nents = drv_data->tx_nents; ++ chan = drv_data->tx_chan; ++ } else { ++ cfg.src_addr = drv_data->ssdr_physical; ++ cfg.src_addr_width = width; ++ cfg.src_maxburst = chip->dma_burst_size; ++ ++ sgt = &drv_data->rx_sgt; ++ nents = drv_data->rx_nents; ++ chan = drv_data->rx_chan; ++ } ++ ++ ret = dmaengine_slave_config(chan, &cfg); ++ if (ret) { ++ dev_warn(&drv_data->pdev->dev, "DMA slave config failed\n"); ++ return NULL; ++ } ++ ++ return dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, ++ DMA_PREP_INTERRUPT | DMA_CTRL_ACK); ++} ++ ++static bool k1_spi_dma_is_possible(size_t len) ++{ ++ return len <= MAX_DMA_LEN; ++} ++ ++static int k1_spi_map_dma_buffers(struct spi_driver_data *drv_data) ++{ ++ const struct chip_data *chip = drv_data->cur_chip; ++ int ret; ++ ++ if (!chip->enable_dma) ++ return 0; ++ ++ /* Don't bother with DMA if we can't do even a single burst */ ++ if (drv_data->len < chip->dma_burst_size) ++ return 0; ++ ++ ret = k1_spi_map_dma_buffer(drv_data, DMA_TO_DEVICE); ++ if (ret <= 0) { ++ dev_warn(&drv_data->pdev->dev, "failed to DMA map TX\n"); ++ return 0; ++ } ++ ++ drv_data->tx_nents = ret; ++ ++ ret = k1_spi_map_dma_buffer(drv_data, DMA_FROM_DEVICE); ++ if (ret <= 0) { ++ k1_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE); ++ dev_warn(&drv_data->pdev->dev, "failed to DMA map RX\n"); ++ return 0; ++ } ++ ++ drv_data->rx_nents = ret; ++ return 1; ++} ++ ++static irqreturn_t k1_spi_dma_transfer(struct spi_driver_data *drv_data) ++{ ++ u32 status; ++ ++ status = k1_spi_read(drv_data, STATUS) & drv_data->mask_sr; ++ if (status & STATUS_ROR) { ++ dmaengine_terminate_all(drv_data->rx_chan); ++ dmaengine_terminate_all(drv_data->tx_chan); ++ k1_spi_dma_transfer_complete(drv_data, true); ++ return IRQ_HANDLED; ++ } ++ ++ return IRQ_NONE; ++} ++ ++static int k1_spi_dma_prepare(struct spi_driver_data *drv_data, u32 dma_burst) ++{ ++ struct dma_async_tx_descriptor *tx_desc, *rx_desc; ++ ++ tx_desc = k1_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV); ++ if (!tx_desc) { ++ dev_err(&drv_data->pdev->dev, ++ "failed to get DMA TX descriptor\n"); ++ return -EBUSY; ++ } ++ ++ rx_desc = k1_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM); ++ if (!rx_desc) { ++ dev_err(&drv_data->pdev->dev, ++ "failed to get DMA RX descriptor\n"); ++ return -EBUSY; ++ } ++ ++ /* We are ready when RX completes */ ++ rx_desc->callback = k1_spi_dma_callback; ++ rx_desc->callback_param = drv_data; ++ ++ dmaengine_submit(rx_desc); ++ dmaengine_submit(tx_desc); ++ return 0; ++} ++ ++static void k1_spi_dma_start(struct spi_driver_data *drv_data) ++{ ++ dma_async_issue_pending(drv_data->rx_chan); ++ dma_async_issue_pending(drv_data->tx_chan); ++ ++ atomic_set(&drv_data->dma_running, 1); ++} ++ ++static int k1_spi_dma_setup(struct spi_driver_data *drv_data) ++{ ++ struct k1_spi_master *pdata = drv_data->master_info; ++ struct device *dev = &drv_data->pdev->dev; ++ dma_cap_mask_t mask; ++ ++ dma_cap_zero(mask); ++ dma_cap_set(DMA_SLAVE, mask); ++ ++ drv_data->dummy = devm_kzalloc(dev, SZ_2K, GFP_KERNEL); ++ if (!drv_data->dummy) ++ return -ENOMEM; ++ ++ drv_data->tx_chan = dma_request_slave_channel_compat(mask, pdata->dma_filter, ++ pdata->tx_param, dev, "tx"); ++ if (!drv_data->tx_chan) ++ return -ENODEV; ++ ++ drv_data->rx_chan = dma_request_slave_channel_compat(mask, pdata->dma_filter, ++ pdata->rx_param, dev, "rx"); ++ if (!drv_data->rx_chan) { ++ dma_release_channel(drv_data->tx_chan); ++ drv_data->tx_chan = NULL; ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ ++static void k1_spi_dma_release(struct spi_driver_data *drv_data) ++{ ++ if (drv_data->rx_chan) { ++ dmaengine_terminate_all(drv_data->rx_chan); ++ dma_release_channel(drv_data->rx_chan); ++ sg_free_table(&drv_data->rx_sgt); ++ drv_data->rx_chan = NULL; ++ } ++ if (drv_data->tx_chan) { ++ dmaengine_terminate_all(drv_data->tx_chan); ++ dma_release_channel(drv_data->tx_chan); ++ sg_free_table(&drv_data->tx_sgt); ++ drv_data->tx_chan = NULL; ++ } ++} ++ ++static int k1_spi_set_dma_burst_and_threshold(struct chip_data *chip, struct spi_device *spi, ++ u8 bits_per_word, u32 *burst_code, u32 *threshold) ++{ ++ /* ++ * If the DMA burst size is given in chip_info we use ++ * that, otherwise we set it to half of FIFO size; SPI ++ * FIFO has 16 entry, so FIFO size = 16*bits_per_word/8; ++ * Also we use the default FIFO thresholds for now. ++ */ ++ if (chip && chip->dma_burst_size) ++ *burst_code = chip->dma_burst_size; ++ else if (bits_per_word <= 8) ++ *burst_code = 8; ++ else if (bits_per_word <= 16) ++ *burst_code = 16; ++ else ++ *burst_code = 32; ++ ++ *threshold = FIFO_RxTresh(RX_THRESH_DFLT) | FIFO_TxTresh(TX_THRESH_DFLT); ++ ++ return 0; ++} ++ ++static void pump_transfers(struct work_struct *work) ++{ ++ struct spi_driver_data *drv_data = container_of(work, struct spi_driver_data, ++ pump_transfers); ++ struct spi_message *message = NULL; ++ struct spi_transfer *transfer = NULL; ++ struct spi_transfer *previous = NULL; ++ struct chip_data *chip = NULL; ++ u8 bits = 0; ++ u32 top_ctrl; ++ u32 fifo_ctrl; ++ u32 int_en = 0; ++ u32 dma_thresh = drv_data->cur_chip->dma_threshold; ++ u32 dma_burst = drv_data->cur_chip->dma_burst_size; ++ ++ /* Get current state information */ ++ message = drv_data->cur_msg; ++ transfer = drv_data->cur_transfer; ++ chip = drv_data->cur_chip; ++ ++ /* Handle for abort */ ++ if (message->state == ERROR_STATE) { ++ message->status = -EIO; ++ giveback(drv_data); ++ return; ++ } ++ ++ /* Handle end of message */ ++ if (message->state == DONE_STATE) { ++ message->status = 0; ++ giveback(drv_data); ++ return; ++ } ++ ++ /* Delay if requested at end of transfer before CS change */ ++ if (message->state == RUNNING_STATE) { ++ previous = list_entry(transfer->transfer_list.prev, ++ struct spi_transfer, transfer_list); ++ spi_transfer_delay_exec(previous); ++ ++ /* Drop chip select only if cs_change is requested */ ++ if (previous->cs_change) ++ cs_deassert(drv_data); ++ } ++ ++ /* Check if we can DMA this transfer */ ++ if (!k1_spi_dma_is_possible(transfer->len) && chip->enable_dma) { ++ /* reject already-mapped transfers; PIO won't always work */ ++ if (message->is_dma_mapped || transfer->rx_dma || transfer->tx_dma) { ++ dev_err(&drv_data->pdev->dev, ++ "%s: mapped transfer length %u is greater than %d\n", ++ __func__, transfer->len, MAX_DMA_LEN); ++ message->status = -EINVAL; ++ giveback(drv_data); ++ return; ++ } ++ ++ /* warn ... we force this to PIO mode */ ++ dev_warn_ratelimited(&message->spi->dev, ++ "%s: DMA disabled for transfer length %ld greater than %d\n", ++ __func__, (long)drv_data->len, MAX_DMA_LEN); ++ } ++ ++ /* Setup the transfer state based on the type of transfer */ ++ if (k1_spi_flush(drv_data) == 0) { ++ dev_err(&drv_data->pdev->dev, "%s: flush failed\n", __func__); ++ message->status = -EIO; ++ giveback(drv_data); ++ return; ++ } ++ drv_data->n_bytes = chip->n_bytes; ++ drv_data->tx = (void *)transfer->tx_buf; ++ drv_data->tx_end = drv_data->tx + transfer->len; ++ drv_data->rx = transfer->rx_buf; ++ drv_data->rx_end = drv_data->rx + transfer->len; ++ drv_data->rx_dma = transfer->rx_dma; ++ drv_data->tx_dma = transfer->tx_dma; ++ drv_data->len = transfer->len; ++ drv_data->write = drv_data->tx ? chip->write : null_writer; ++ drv_data->read = drv_data->rx ? chip->read : null_reader; ++ ++ /* Change speed and bit per word on a per transfer */ ++ bits = transfer->bits_per_word; ++ ++ if (bits <= 8) { ++ drv_data->n_bytes = 1; ++ drv_data->read = drv_data->read != null_reader ? u8_reader : null_reader; ++ drv_data->write = drv_data->write != null_writer ? u8_writer : null_writer; ++ } else if (bits <= 16) { ++ drv_data->n_bytes = 2; ++ drv_data->read = drv_data->read != null_reader ? u16_reader : null_reader; ++ drv_data->write = drv_data->write != null_writer ? u16_writer : null_writer; ++ } else if (bits <= 32) { ++ drv_data->n_bytes = 4; ++ drv_data->read = drv_data->read != null_reader ? u32_reader : null_reader; ++ drv_data->write = drv_data->write != null_writer ? u32_writer : null_writer; ++ } ++ /* ++ * if bits/word is changed in dma mode, then must check the ++ * thresholds and burst also ++ */ ++ if (chip->enable_dma) { ++ if (k1_spi_set_dma_burst_and_threshold(chip, message->spi, bits, ++ &dma_burst, &dma_thresh)) ++ dev_warn_ratelimited(&message->spi->dev, ++ "%s: DMA burst size reduced to match bits_per_word\n", ++ __func__); ++ } ++ ++ top_ctrl = k1_configure_topctrl(drv_data, bits); ++ dev_dbg(&message->spi->dev, "%u Hz, %s\n", drv_data->master->max_speed_hz, ++ chip->enable_dma ? "DMA" : "PIO"); ++ top_ctrl |= chip->top_ctrl; ++ fifo_ctrl = chip->fifo_ctrl; ++ ++ if (drv_data->spi_enhancement) { ++ /* ++ * If transfer length is times of 4, then use ++ * 32 bit fifo width with endian swap support ++ */ ++ if (drv_data->len % 4 == 0 && transfer->bits_per_word <= 16) { ++ if (transfer->bits_per_word <= 8) ++ fifo_ctrl |= FIFO_WR_ENDIAN_8BITS | FIFO_RD_ENDIAN_8BITS; ++ else if (transfer->bits_per_word <= 16) ++ fifo_ctrl |= FIFO_WR_ENDIAN_16BITS | FIFO_RD_ENDIAN_16BITS; ++ bits = 32; ++ drv_data->n_bytes = 4; ++ if (transfer->rx_buf) ++ drv_data->read = u32_reader; ++ if (transfer->tx_buf) ++ drv_data->write = u32_writer; ++ ++ if (chip->enable_dma) { ++ if (k1_spi_set_dma_burst_and_threshold(chip, message->spi, ++ bits, &dma_burst, &dma_thresh)) ++ dev_warn_ratelimited(&message->spi->dev, ++ "%s: DMA set burst size to match bits_per_word\n", ++ __func__); ++ } ++ ++ top_ctrl &= ~TOP_DSS_MASK; ++ top_ctrl |= TOP_DSS(32); ++ } ++ } ++ ++ message->state = RUNNING_STATE; ++ ++ drv_data->dma_mapped = 0; ++ if (k1_spi_dma_is_possible(drv_data->len)) ++ drv_data->dma_mapped = k1_spi_map_dma_buffers(drv_data); ++ if (drv_data->dma_mapped) { ++ /* Ensure we have the correct interrupt handler */ ++ drv_data->transfer_handler = k1_spi_dma_transfer; ++ ++ k1_spi_dma_prepare(drv_data, dma_burst); ++ ++ /* Clear status and start DMA engine */ ++ fifo_ctrl |= chip->fifo_ctrl | dma_thresh | drv_data->dma_fifo_ctrl; ++ top_ctrl |= chip->top_ctrl | drv_data->dma_top_ctrl; ++ k1_spi_write(drv_data, STATUS, drv_data->clear_sr); ++ k1_spi_dma_start(drv_data); ++ int_en = k1_spi_read(drv_data, INT_EN); ++ } else { ++ /* Ensure we have the correct interrupt handler */ ++ drv_data->transfer_handler = interrupt_transfer; ++ ++ fifo_ctrl = fifo_ctrl | chip->fifo_ctrl | chip->threshold; ++ int_en = k1_spi_read(drv_data, INT_EN) | drv_data->int_cr; ++ k1_spi_write(drv_data, STATUS, drv_data->clear_sr); ++ } ++ ++ k1_spi_write(drv_data, TO, chip->timeout); ++ ++ cs_assert(drv_data); ++ ++ top_ctrl |= TOP_HOLD_FRAME_LOW; ++ top_ctrl &= ~TOP_SSE; ++ k1_spi_write(drv_data, TOP_CTRL, top_ctrl); ++ k1_spi_write(drv_data, FIFO_CTRL, fifo_ctrl); ++ k1_spi_write(drv_data, INT_EN, int_en); ++ top_ctrl |= TOP_SSE; ++ k1_spi_write(drv_data, TOP_CTRL, top_ctrl); ++} ++ ++static int k1_spi_transfer_one_message(struct spi_master *master, struct spi_message *msg) ++{ ++ struct spi_driver_data *drv_data = spi_master_get_devdata(master); ++ ++ drv_data->cur_msg = msg; ++ /* Initial message state*/ ++ drv_data->cur_msg->state = START_STATE; ++ drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, ++ struct spi_transfer, transfer_list); ++ ++ /* ++ * prepare to setup the spi, in pump_transfers, using the per ++ * chip configuration ++ */ ++ drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); ++ ++ if ((drv_data->cur_transfer->speed_hz) && ++ (drv_data->cur_transfer->speed_hz != clk_get_rate(drv_data->clk))) { ++ clk_set_rate(drv_data->clk, drv_data->cur_transfer->speed_hz); ++ } ++ ++ reinit_completion(&drv_data->cur_msg_completion); ++ /* Mark as busy and launch transfers */ ++ queue_work(system_wq, &drv_data->pump_transfers); ++ wait_for_completion(&drv_data->cur_msg_completion); ++ ++ return 0; ++} ++ ++static int k1_spi_unprepare_transfer(struct spi_master *master) ++{ ++ struct spi_driver_data *drv_data = spi_master_get_devdata(master); ++ ++ /* Disable the spi now */ ++ k1_spi_write(drv_data, TOP_CTRL, k1_spi_read(drv_data, TOP_CTRL) & ++ ~(TOP_SSE | TOP_HOLD_FRAME_LOW)); ++ ++ return 0; ++} ++ ++static int setup(struct spi_device *spi) ++{ ++ struct chip_data *chip; ++ struct spi_driver_data *drv_data = spi_master_get_devdata(spi->master); ++ uint tx_thres, tx_hi_thres, rx_thres; ++ ++ tx_thres = TX_THRESH_DFLT; ++ tx_hi_thres = 0; ++ rx_thres = RX_THRESH_DFLT; ++ ++ /* Only alloc on first setup */ ++ chip = spi_get_ctldata(spi); ++ if (!chip) { ++ chip = devm_kzalloc(&spi->master->dev, sizeof(struct chip_data), GFP_KERNEL); ++ if (!chip) ++ return -ENOMEM; ++ ++ chip->gpio_cs = -1; ++ chip->enable_dma = 0; ++ chip->timeout = TIMOUT_DFLT; ++ } ++ ++ chip->top_ctrl = 0; ++ chip->fifo_ctrl = 0; ++ ++ chip->enable_dma = drv_data->master_info->enable_dma; ++ if (chip->enable_dma) { ++ /* set up legal burst and threshold for dma */ ++ if (k1_spi_set_dma_burst_and_threshold(chip, spi, spi->bits_per_word, ++ &chip->dma_burst_size, ++ &chip->dma_threshold)) { ++ dev_warn(&spi->dev, "DMA burst size reduced to match bits_per_word\n"); ++ } ++ } ++ chip->threshold = (FIFO_RxTresh(rx_thres) & FIFO_RFT) | ++ (FIFO_TxTresh(tx_thres) & FIFO_TFT); ++ ++ chip->top_ctrl &= ~(TOP_SPO | TOP_SPH); ++ chip->top_ctrl |= (((spi->mode & SPI_CPHA) != 0) ? TOP_SPH : 0) | ++ (((spi->mode & SPI_CPOL) != 0) ? TOP_SPO : 0); ++ ++ if (spi->mode & SPI_LOOP) ++ chip->top_ctrl |= TOP_LBM; ++ ++ /* Enable rx fifo auto full control */ ++ if (drv_data->spi_enhancement) ++ chip->fifo_ctrl |= FIFO_RXFIFO_AUTO_FULL_CTRL; ++ ++ if (spi->bits_per_word <= 8) { ++ chip->n_bytes = 1; ++ chip->read = u8_reader; ++ chip->write = u8_writer; ++ } else if (spi->bits_per_word <= 16) { ++ chip->n_bytes = 2; ++ chip->read = u16_reader; ++ chip->write = u16_writer; ++ } else if (spi->bits_per_word <= 32) { ++ chip->n_bytes = 4; ++ chip->read = u32_reader; ++ chip->write = u32_writer; ++ } ++ ++ if (clk_get_rate(drv_data->clk) != spi->max_speed_hz) ++ clk_set_rate(drv_data->clk, spi->max_speed_hz); ++ ++ spi_set_ctldata(spi, chip); ++ ++ return 0; ++} ++ ++static void cleanup(struct spi_device *spi) ++{ ++ struct chip_data *chip = spi_get_ctldata(spi); ++ ++ if (!chip) ++ return; ++ ++ if (gpio_is_valid(chip->gpio_cs)) ++ gpio_free(chip->gpio_cs); ++ ++ devm_kfree(&spi->dev, chip); ++} ++ ++static const struct of_device_id k1_spi_dt_ids[] = { ++ { .compatible = "spacemit,k1-spi", }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, k1_spi_dt_ids); ++ ++static int k1_spi_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct k1_spi_master *platform_info; ++ struct spi_master *master = NULL; ++ struct spi_driver_data *drv_data = NULL; ++ struct device_node *np = dev->of_node; ++ struct resource *iores; ++ u32 bus_num, tmp; ++ int status; ++ ++ platform_info = dev_get_platdata(dev); ++ if (!platform_info) { ++ platform_info = devm_kzalloc(dev, sizeof(*platform_info), GFP_KERNEL); ++ if (!platform_info) ++ return -ENOMEM; ++ platform_info->num_chipselect = 1; ++ /* TODO: NO DMA on FPGA yet */ ++ if (of_get_property(np, "k1,spi-disable-dma", NULL)) ++ platform_info->enable_dma = 0; ++ else ++ platform_info->enable_dma = 1; ++ } ++ ++ master = spi_alloc_master(dev, sizeof(struct spi_driver_data)); ++ if (!master) { ++ dev_err(&pdev->dev, "cannot alloc spi_master\n"); ++ return -ENOMEM; ++ } ++ drv_data = spi_master_get_devdata(master); ++ ++ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (iores == NULL) { ++ dev_err(dev, "no memory resource defined\n"); ++ status = -ENODEV; ++ goto out_error_master_alloc; ++ } ++ ++ drv_data->ioaddr = devm_ioremap_resource(dev, iores); ++ if (drv_data->ioaddr == NULL) { ++ dev_err(dev, "failed to ioremap() registers\n"); ++ status = -ENODEV; ++ goto out_error_master_alloc; ++ } ++ ++ drv_data->irq = platform_get_irq(pdev, 0); ++ if (drv_data->irq < 0) { ++ dev_err(dev, "no IRQ resource defined\n"); ++ status = -ENODEV; ++ goto out_error_master_alloc; ++ } ++ ++ /* Receive FIFO auto full ctrl enable */ ++ if (of_get_property(np, "k1,spi-enhancement", NULL)) ++ drv_data->spi_enhancement = 1; ++ ++ master->dev.of_node = dev->of_node; ++ if (!of_property_read_u32(np, "k1,spi-id", &bus_num)) ++ master->bus_num = bus_num; ++ drv_data->ssdr_physical = iores->start + DATAR; ++ ++ drv_data->clk = devm_clk_get(dev, NULL); ++ if (IS_ERR_OR_NULL(drv_data->clk)) { ++ dev_err(&pdev->dev, "cannot get clk\n"); ++ status = -ENODEV; ++ goto out_error_clk_check; ++ } ++ ++ drv_data->reset = devm_reset_control_get_optional(dev, NULL); ++ if (IS_ERR_OR_NULL(drv_data->reset)) { ++ dev_err(&pdev->dev, "Failed to get spi's reset\n"); ++ status = -ENODEV; ++ goto out_error_clk_check; ++ } ++ ++ drv_data->master = master; ++ drv_data->master_info = platform_info; ++ drv_data->pdev = pdev; ++ ++ master->dev.parent = &pdev->dev; ++ /* the spi->mode bits understood by this driver: */ ++ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; ++ ++ master->dma_alignment = DMA_ALIGNMENT; ++ master->cleanup = cleanup; ++ master->setup = setup; ++ master->transfer_one_message = k1_spi_transfer_one_message; ++ master->unprepare_transfer_hardware = k1_spi_unprepare_transfer; ++ master->auto_runtime_pm = true; ++ ++ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); ++ drv_data->int_cr = INT_EN_TIE | INT_EN_RIE | INT_EN_TINTE; ++ drv_data->clear_sr = STATUS_ROR | STATUS_TINT; ++ drv_data->mask_sr = STATUS_TINT | STATUS_RFS | STATUS_TFS | STATUS_ROR; ++ drv_data->dma_top_ctrl = DEFAULT_DMA_TOP_CTRL; ++ drv_data->dma_fifo_ctrl = DEFAULT_DMA_FIFO_CTRL; ++ ++ status = devm_request_irq(&pdev->dev, drv_data->irq, spi_int, IRQF_SHARED, ++ dev_name(dev), drv_data); ++ if (status < 0) { ++ dev_err(&pdev->dev, "cannot get IRQ %d\n", drv_data->irq); ++ goto out_error_master_alloc; ++ } ++ ++ /* Setup DMA if requested */ ++ if (platform_info->enable_dma) { ++ status = k1_spi_dma_setup(drv_data); ++ if (status) { ++ dev_dbg(dev, "no DMA channels available, using PIO\n"); ++ platform_info->enable_dma = false; ++ } ++ } ++ ++ status = of_property_read_u32(np, "k1,spi-clock-rate", &master->max_speed_hz); ++ if (status < 0) { ++ dev_err(&pdev->dev, "cannot get clock-rate from DT file\n"); ++ goto out_error_master_alloc; ++ } ++ ++ clk_set_rate(drv_data->clk, master->max_speed_hz); ++ master->max_speed_hz = clk_get_rate(drv_data->clk); ++ clk_prepare_enable(drv_data->clk); ++ reset_control_deassert(drv_data->reset); ++ ++ /* Load default spi controller configuration */ ++ k1_spi_write(drv_data, TOP_CTRL, 0); ++ k1_spi_write(drv_data, FIFO_CTRL, 0); ++ tmp = FIFO_RxTresh(RX_THRESH_DFLT) | FIFO_TxTresh(TX_THRESH_DFLT); ++ k1_spi_write(drv_data, FIFO_CTRL, tmp); ++ tmp = TOP_FRF_Motorola | TOP_DSS(8); ++ k1_spi_write(drv_data, TOP_CTRL, tmp); ++ k1_spi_write(drv_data, TO, 0); ++ ++ k1_spi_write(drv_data, PSP_CTRL, 0); ++ ++ master->num_chipselect = platform_info->num_chipselect; ++ ++ INIT_WORK(&drv_data->pump_transfers, pump_transfers); ++ pm_runtime_set_autosuspend_delay(&pdev->dev, 50); ++ pm_runtime_use_autosuspend(&pdev->dev); ++ pm_runtime_set_active(&pdev->dev); ++ pm_runtime_enable(&pdev->dev); ++ ++ init_completion(&drv_data->cur_msg_completion); ++ ++ /* Register with the SPI framework */ ++ platform_set_drvdata(pdev, drv_data); ++ status = devm_spi_register_master(&pdev->dev, master); ++ if (status != 0) { ++ dev_err(&pdev->dev, "problem registering spi master\n"); ++ goto out_error_clock_enabled; ++ } ++ ++ return status; ++ ++out_error_clock_enabled: ++ reset_control_assert(drv_data->reset); ++ clk_disable_unprepare(drv_data->clk); ++ k1_spi_dma_release(drv_data); ++ free_irq(drv_data->irq, drv_data); ++out_error_clk_check: ++out_error_master_alloc: ++ spi_master_put(master); ++ return status; ++} ++ ++static int k1_spi_remove(struct platform_device *pdev) ++{ ++ struct spi_driver_data *drv_data = platform_get_drvdata(pdev); ++ ++ if (!drv_data) ++ return 0; ++ ++ pm_runtime_get_sync(&pdev->dev); ++ ++ /* Disable the spi at the peripheral and SOC level */ ++ k1_spi_write(drv_data, TOP_CTRL, 0); ++ k1_spi_write(drv_data, FIFO_CTRL, 0); ++ ++ reset_control_assert(drv_data->reset); ++ clk_disable_unprepare(drv_data->clk); ++ ++ /* Release DMA */ ++ if (drv_data->master_info->enable_dma) ++ k1_spi_dma_release(drv_data); ++ ++ pm_runtime_put_noidle(&pdev->dev); ++ pm_runtime_disable(&pdev->dev); ++ ++ /* Release IRQ */ ++ free_irq(drv_data->irq, drv_data); ++ ++ return 0; ++} ++ ++static void k1_spi_shutdown(struct platform_device *pdev) ++{ ++ int status = k1_spi_remove(pdev); ++ ++ if (status != 0) ++ dev_err(&pdev->dev, "shutdown failed with %d\n", status); ++} ++ ++static struct platform_driver driver = { ++ .driver = { ++ .name = "k1-spi", ++ .of_match_table = k1_spi_dt_ids, ++ }, ++ .probe = k1_spi_probe, ++ .remove = k1_spi_remove, ++ .shutdown = k1_spi_shutdown, ++}; ++ ++static int __init k1_spi_init(void) ++{ ++ return platform_driver_register(&driver); ++} ++module_init(k1_spi_init); ++ ++static void __exit k1_spi_exit(void) ++{ ++ platform_driver_unregister(&driver); ++} ++module_exit(k1_spi_exit); ++ ++MODULE_DESCRIPTION("Spacemit k1 spi controller driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/spi/spi-spacemit-k1.h b/drivers/spi/spi-spacemit-k1.h +new file mode 100644 +index 000000000000..01cb397b8c05 +--- /dev/null ++++ b/drivers/spi/spi-spacemit-k1.h +@@ -0,0 +1,281 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Support for Spacemit k1 spi controller ++ * ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++ ++#ifndef _SPI_SPACEMIT_K1_H ++#define _SPI_SPACEMIT_K1_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* Spacemit k1 SPI Registers */ ++#define TOP_CTRL 0x00 /* SPI Top Control Register */ ++#define FIFO_CTRL 0x04 /* SPI FIFO Control Register */ ++#define INT_EN 0x08 /* SPI Interrupt Enable Register */ ++#define TO 0x0C /* SPI Time Out Register */ ++#define DATAR 0x10 /* SPI Data Register */ ++#define STATUS 0x14 /* SPI Stauts Register */ ++/* SPI Programmable Serial Protocal Control Register */ ++#define PSP_CTRL 0x18 ++#define NET_WORK_CTRL 0x1C /* SPI NET Work Control Register */ ++#define NET_WORK_STATUS 0x20 /* SPI Net Work Status Register */ ++#define RWOT_CTRL 0x24 /* SPI RWOT Control Register */ ++#define RWOT_CCM 0x28 /* SPI RWOT Counter Cycles Match Register */ ++/* SPI RWOT Counter Value Write for Read Request Register */ ++#define RWOT_CVWRn 0x2C ++ ++/* 0x00 TOP_CTRL */ ++#define TOP_TTELP (1 << 18) ++#define TOP_TTE (1 << 17) ++#define TOP_SCFR (1 << 16) ++#define TOP_IFS (1 << 15) ++#define TOP_HOLD_FRAME_LOW (1 << 14) ++#define TOP_TRAIL (1 << 13) ++#define TOP_LBM (1 << 12) ++#define TOP_SPH (1 << 11) ++#define TOP_SPO (1 << 10) ++#define TOP_DSS(x) ((x - 1) << 5) ++#define TOP_DSS_MASK (0x1F << 5) ++#define TOP_SFRMDIR (1 << 4) ++#define TOP_SCLKDIR (1 << 3) ++#define TOP_FRF_MASK (0x3 << 1) ++/* Motorola's Serial Peripheral Interface (SPI) */ ++#define TOP_FRF_Motorola (0x0 << 1) ++/* Texas Instruments' Synchronous Serial Protocol (SSP) */ ++#define TOP_FRF_TI (0x1 << 1) ++#define TOP_FRF_National (0x2 << 1) /* National Microwire */ ++#define TOP_FRF_PSP (0x3 << 1) /* Programmable Serial Protocol(PSP) */ ++#define TOP_SSE (1 << 0) ++ ++/* 0x04 FIFO_CTRL */ ++#define FIFO_STRF (1 << 19) ++#define FIFO_EFWR (1 << 18) ++#define FIFO_RXFIFO_AUTO_FULL_CTRL (1 << 17) ++#define FIFO_FPCKE (1 << 16) ++#define FIFO_TXFIFO_WR_ENDIAN_MASK (0x3 << 14) ++#define FIFO_RXFIFO_RD_ENDIAN_MASK (0x3 << 12) ++#define FIFO_WR_ENDIAN_16BITS (1 << 14) /* Swap first 16 bits and last 16 bits */ ++#define FIFO_WR_ENDIAN_8BITS (2 << 14) /* Swap all 4 bytes */ ++#define FIFO_RD_ENDIAN_16BITS (1 << 12) /* Swap first 16 bits and last 16 bits */ ++#define FIFO_RD_ENDIAN_8BITS (2 << 12) /* Swap all 4 bytes */ ++#define FIFO_RSRE (1 << 11) ++#define FIFO_TSRE (1 << 10) ++ ++/* 0x08 INT_EN */ ++#define INT_EN_EBCEI (1 << 6) ++#define INT_EN_TIM (1 << 5) ++#define INT_EN_RIM (1 << 4) ++#define INT_EN_TIE (1 << 3) ++#define INT_EN_RIE (1 << 2) ++#define INT_EN_TINTE (1 << 1) ++#define INT_EN_PINTE (1 << 0) ++ ++/* 0x0C TO */ ++#define TIMEOUT(x) ((x) << 0) ++ ++/* 0x10 DATAR */ ++#define DATA(x) ((x) << 0) ++ ++/* 0x14 STATUS */ ++#define STATUS_OSS (1 << 23) ++#define STATUS_TX_OSS (1 << 22) ++#define STATUS_BCE (1 << 21) ++#define STATUS_ROR (1 << 20) ++#define STATUS_RNE (1 << 14) ++#define STATUS_RFS (1 << 13) ++#define STATUS_TUR (1 << 12) ++#define STATUS_TNF (1 << 6) ++#define STATUS_TFS (1 << 5) ++#define STATUS_EOC (1 << 4) ++#define STATUS_TINT (1 << 3) ++#define STATUS_PINT (1 << 2) ++#define STATUS_CSS (1 << 1) ++#define STATUS_BSY (1 << 0) ++ ++/* 0x18 PSP_CTRL */ ++#define PSP_EDMYSTOP(x) ((x) << 27) ++#define PSP_EMYSTOP(x) ((x) << 25) ++#define PSP_EDMYSTRT(x) ((x) << 23) ++#define PSP_DMYSTRT(x) ((x) << 21) ++#define PSP_STRTDLY(x) ((x) << 18) ++#define PSP_SFRMWDTH(x) ((x) << 12) ++#define PSP_SFRMDLY(x) ((x) << 5) ++#define PSP_SFRMP (1 << 4) ++#define PSP_FSRT (1 << 3) ++#define PSP_ETDS (1 << 2) ++#define PSP_SCMODE(x) ((x) << 0) ++ ++/* 0x1C NET_WORK_CTRL */ ++#define RTSA(x) ((x) << 12) ++#define RTSA_MASK (0xFF << 12) ++#define TTSA(x) ((x) << 4) ++#define TTSA_MASK (0xFF << 4) ++#define NET_FRDC(x) ((x) << 1) ++#define NET_WORK_MODE (1 << 0) ++ ++/* 0x20 NET_WORK_STATUS */ ++#define NET_SATUS_NMBSY (1 << 3) ++#define NET_STATUS_TSS(x) ((x) << 0) ++ ++/* 0x24 RWOT_CTRL */ ++#define RWOT_MASK_RWOT_LAST_SAMPLE (1 << 4) ++#define RWOT_CLR_RWOT_CYCLE (1 << 3) ++#define RWOT_SET_RWOT_CYCLE (1 << 2) ++#define RWOT_CYCLE_RWOT_EN (1 << 1) ++#define RWOT_RWOT (1 << 0) ++ ++struct spi_driver_data { ++ /* Driver model hookup */ ++ struct platform_device *pdev; ++ ++ /* SPI framework hookup */ ++ struct spi_master *master; ++ ++ /* k1 hookup */ ++ struct k1_spi_master *master_info; ++ ++ /* spi register addresses */ ++ void __iomem *ioaddr; ++ u32 ssdr_physical; ++ ++ /* spi masks*/ ++ u32 dma_fifo_ctrl; ++ u32 dma_top_ctrl; ++ u32 int_cr; ++ u32 clear_sr; ++ u32 mask_sr; ++ ++ /* Message Transfer pump */ ++ struct work_struct pump_transfers; ++ ++ /* DMA engine support */ ++ struct dma_chan *rx_chan; ++ struct dma_chan *tx_chan; ++ struct sg_table rx_sgt; ++ struct sg_table tx_sgt; ++ int rx_nents; ++ int tx_nents; ++ void *dummy; ++ atomic_t dma_running; ++ ++ /* Current message transfer state info */ ++ struct spi_message *cur_msg; ++ struct spi_transfer *cur_transfer; ++ struct chip_data *cur_chip; ++ struct completion cur_msg_completion; ++ size_t len; ++ void *tx; ++ void *tx_end; ++ void *rx; ++ void *rx_end; ++ int dma_mapped; ++ dma_addr_t rx_dma; ++ dma_addr_t tx_dma; ++ size_t rx_map_len; ++ size_t tx_map_len; ++ u8 n_bytes; ++ int (*write)(struct spi_driver_data *drv_data); ++ int (*read)(struct spi_driver_data *drv_data); ++ irqreturn_t (*transfer_handler)(struct spi_driver_data *drv_data); ++ void (*cs_control)(u32 command); ++ struct freq_qos_request qos_idle; ++ int qos_idle_value; ++ struct clk *clk; ++ struct reset_control *reset; ++ int irq; ++ unsigned int spi_enhancement; ++ unsigned char slave_mode; ++ struct timer_list slave_rx_timer; ++}; ++ ++struct chip_data { ++ u32 top_ctrl; ++ u32 fifo_ctrl; ++ u32 timeout; ++ u8 n_bytes; ++ u32 dma_burst_size; ++ u32 threshold; ++ u32 dma_threshold; ++ u8 enable_dma; ++ union { ++ int gpio_cs; ++ unsigned int frm; ++ }; ++ int gpio_cs_inverted; ++ int (*write)(struct spi_driver_data *drv_data); ++ int (*read)(struct spi_driver_data *drv_data); ++ void (*cs_control)(u32 command); ++}; ++ ++static inline u32 k1_spi_read(const struct spi_driver_data *drv_data, unsigned int reg) ++{ ++ return __raw_readl(drv_data->ioaddr + reg); ++} ++ ++static inline void k1_spi_write(const struct spi_driver_data *drv_data, unsigned int reg, u32 val) ++{ ++ __raw_writel(val, drv_data->ioaddr + reg); ++} ++ ++#define START_STATE ((void *)0) ++#define RUNNING_STATE ((void *)1) ++#define DONE_STATE ((void *)2) ++#define ERROR_STATE ((void *)-1) ++ ++#define IS_DMA_ALIGNED(x) IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT) ++#define DMA_ALIGNMENT 64 ++ ++extern int k1_spi_flush(struct spi_driver_data *drv_data); ++extern void *k1_spi_next_transfer(struct spi_driver_data *drv_data); ++ ++/* ++ * Select the right DMA implementation. ++ */ ++#define MAX_DMA_LEN SZ_512K ++#define DEFAULT_DMA_FIFO_CTRL (FIFO_TSRE | FIFO_RSRE) ++#define DEFAULT_DMA_TOP_CTRL (TOP_TRAIL) ++ ++#define RX_THRESH_DFLT 9 ++#define TX_THRESH_DFLT 8 ++/* 0x14 */ ++#define STATUS_TFL_MASK (0x1f << 7) /* Transmit FIFO Level mask */ ++#define STATUS_RFL_MASK (0x1f << 15) /* Receive FIFO Level mask */ ++/* 0x4 */ ++#define FIFO_TFT (0x0000001F) /* Transmit FIFO Threshold (mask) */ ++#define FIFO_TxTresh(x) (((x) - 1) << 0) /* level [1..32] */ ++#define FIFO_RFT (0x000003E0) /* Receive FIFO Threshold (mask) */ ++#define FIFO_RxTresh(x) (((x) - 1) << 5) /* level [1..32] */ ++ ++#define K1_SPI_CS_ASSERT (0x01) ++#define K1_SPI_CS_DEASSERT (0x02) ++ ++struct dma_chan; ++ ++/* device.platform_data for spi controller devices */ ++struct k1_spi_master { ++ u16 num_chipselect; ++ u8 enable_dma; ++ ++ /* DMA engine specific config */ ++ bool (*dma_filter)(struct dma_chan *chan, void *param); ++ void *tx_param; ++ void *rx_param; ++}; ++ ++#endif /* _SPI_SPACEMIT_K1_H */ diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 16bb4fc3a4ba..1ec345617508 100644 --- a/drivers/spi/spidev.c @@ -559454,10 +609020,10 @@ index d5b28fd35d66..9eb9da3291cc 100644 pr_warn("invalid \"method\" property: %s\n", method); return ERR_PTR(-EINVAL); diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c -index 7fa66501792d..57900b189e99 100644 +index 7f23037813bc..eaa808e16861 100644 --- a/drivers/tty/serial/8250/8250_dma.c +++ b/drivers/tty/serial/8250/8250_dma.c -@@ -205,66 +205,68 @@ int serial8250_request_dma(struct uart_8250_port *p) +@@ -221,66 +221,68 @@ int serial8250_request_dma(struct uart_8250_port *p) dma->rxchan = dma_request_slave_channel_compat(mask, dma->fn, dma->rx_param, p->port.dev, "rx"); @@ -559576,7 +609142,7 @@ index 7fa66501792d..57900b189e99 100644 return 0; err: dma_release_channel(dma->txchan); -@@ -281,21 +283,23 @@ void serial8250_release_dma(struct uart_8250_port *p) +@@ -297,21 +299,23 @@ void serial8250_release_dma(struct uart_8250_port *p) if (!dma) return; @@ -559616,10 +609182,63 @@ index 7fa66501792d..57900b189e99 100644 } EXPORT_SYMBOL_GPL(serial8250_release_dma); diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c -index 72f9aab75ab1..65a9865eb6cb 100644 +index 72f9aab75ab1..7909eeef9edc 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c -@@ -262,7 +262,7 @@ static int dw8250_handle_irq(struct uart_port *p) +@@ -9,7 +9,6 @@ + * LCR is written whilst busy. If it is, then a busy detect interrupt is + * raised, the LCR needs to be rewritten and the uart status register read. + */ +-#include + #include + #include + #include +@@ -17,7 +16,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -56,6 +54,36 @@ + #define DW_UART_QUIRK_ARMADA_38X BIT(1) + #define DW_UART_QUIRK_SKIP_SET_RATE BIT(2) + #define DW_UART_QUIRK_IS_DMA_FC BIT(3) ++#define DW_UART_QUIRK_APMC0D08 BIT(4) ++#define DW_UART_QUIRK_CPR_VALUE BIT(5) ++#define DW_UART_QUIRK_FIXED_TYPE BIT(6) ++ ++struct dw8250_platform_data { ++ u8 usr_reg; ++ u32 cpr_value; ++ unsigned int quirks; ++}; ++ ++struct dw8250_data { ++ struct dw8250_port_data data; ++ const struct dw8250_platform_data *pdata; ++ ++ int msr_mask_on; ++ int msr_mask_off; ++ struct clk *clk; ++ struct clk *pclk; ++ struct notifier_block clk_notifier; ++ struct work_struct clk_work; ++ struct reset_control *rst; ++ ++ unsigned int skip_autocfg:1; ++ unsigned int uart_16550_compatible:1; ++}; ++ ++static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data) ++{ ++ return container_of(data, struct dw8250_data, data); ++} + + static inline struct dw8250_data *clk_to_dw8250_data(struct notifier_block *nb) + { +@@ -262,7 +290,7 @@ static int dw8250_handle_irq(struct uart_port *p) * This problem has only been observed so far when not in DMA mode * so we limit the workaround only to non-DMA mode. */ @@ -559628,8 +609247,314 @@ index 72f9aab75ab1..65a9865eb6cb 100644 spin_lock_irqsave(&p->lock, flags); status = serial_lsr_in(up); +@@ -445,44 +473,38 @@ static void dw8250_prepare_rx_dma(struct uart_8250_port *p) + + static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data) + { +- struct device_node *np = p->dev->of_node; ++ unsigned int quirks = data->pdata ? data->pdata->quirks : 0; ++ u32 cpr_value = data->pdata ? data->pdata->cpr_value : 0; + +- if (np) { +- unsigned int quirks = data->pdata->quirks; +- int id; ++ if (quirks & DW_UART_QUIRK_CPR_VALUE) ++ data->data.cpr_value = cpr_value; + +- /* get index of serial line, if found in DT aliases */ +- id = of_alias_get_id(np, "serial"); +- if (id >= 0) +- p->line = id; + #ifdef CONFIG_64BIT +- if (quirks & DW_UART_QUIRK_OCTEON) { +- p->serial_in = dw8250_serial_inq; +- p->serial_out = dw8250_serial_outq; +- p->flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE; +- p->type = PORT_OCTEON; +- data->skip_autocfg = true; +- } ++ if (quirks & DW_UART_QUIRK_OCTEON) { ++ p->serial_in = dw8250_serial_inq; ++ p->serial_out = dw8250_serial_outq; ++ p->flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE; ++ p->type = PORT_OCTEON; ++ data->skip_autocfg = true; ++ } + #endif + +- if (of_device_is_big_endian(np)) { +- p->iotype = UPIO_MEM32BE; +- p->serial_in = dw8250_serial_in32be; +- p->serial_out = dw8250_serial_out32be; +- } +- +- if (quirks & DW_UART_QUIRK_ARMADA_38X) +- p->serial_out = dw8250_serial_out38x; +- if (quirks & DW_UART_QUIRK_SKIP_SET_RATE) +- p->set_termios = dw8250_do_set_termios; +- if (quirks & DW_UART_QUIRK_IS_DMA_FC) { +- data->data.dma.txconf.device_fc = 1; +- data->data.dma.rxconf.device_fc = 1; +- data->data.dma.prepare_tx_dma = dw8250_prepare_tx_dma; +- data->data.dma.prepare_rx_dma = dw8250_prepare_rx_dma; +- } +- +- } else if (acpi_dev_present("APMC0D08", NULL, -1)) { ++ if (quirks & DW_UART_QUIRK_ARMADA_38X) ++ p->serial_out = dw8250_serial_out38x; ++ if (quirks & DW_UART_QUIRK_SKIP_SET_RATE) ++ p->set_termios = dw8250_do_set_termios; ++ if (quirks & DW_UART_QUIRK_IS_DMA_FC) { ++ data->data.dma.txconf.device_fc = 1; ++ data->data.dma.rxconf.device_fc = 1; ++ data->data.dma.prepare_tx_dma = dw8250_prepare_tx_dma; ++ data->data.dma.prepare_rx_dma = dw8250_prepare_rx_dma; ++ } ++ if (quirks & DW_UART_QUIRK_FIXED_TYPE) { ++ p->flags |= UPF_FIXED_TYPE; ++ p->type = PORT_16550A; ++ data->skip_autocfg = true; ++ } ++ if (quirks & DW_UART_QUIRK_APMC0D08) { + p->iotype = UPIO_MEM32; + p->regshift = 2; + p->serial_in = dw8250_serial_in32; +@@ -515,39 +537,21 @@ static int dw8250_probe(struct platform_device *pdev) + struct device *dev = &pdev->dev; + struct dw8250_data *data; + struct resource *regs; +- int irq; + int err; +- u32 val; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) + return dev_err_probe(dev, -EINVAL, "no registers defined\n"); + +- irq = platform_get_irq_optional(pdev, 0); +- /* no interrupt -> fall back to polling */ +- if (irq == -ENXIO) +- irq = 0; +- if (irq < 0) +- return irq; +- + spin_lock_init(&p->lock); +- p->mapbase = regs->start; +- p->irq = irq; + p->handle_irq = dw8250_handle_irq; + p->pm = dw8250_do_pm; + p->type = PORT_8250; +- p->flags = UPF_SHARE_IRQ | UPF_FIXED_PORT; ++ p->flags = UPF_FIXED_PORT; + p->dev = dev; +- p->iotype = UPIO_MEM; +- p->serial_in = dw8250_serial_in; +- p->serial_out = dw8250_serial_out; + p->set_ldisc = dw8250_set_ldisc; + p->set_termios = dw8250_set_termios; + +- p->membase = devm_ioremap(dev, regs->start, resource_size(regs)); +- if (!p->membase) +- return -ENOMEM; +- + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; +@@ -559,15 +563,35 @@ static int dw8250_probe(struct platform_device *pdev) + data->uart_16550_compatible = device_property_read_bool(dev, + "snps,uart-16550-compatible"); + +- err = device_property_read_u32(dev, "reg-shift", &val); +- if (!err) +- p->regshift = val; ++ p->mapbase = regs->start; ++ p->mapsize = resource_size(regs); + +- err = device_property_read_u32(dev, "reg-io-width", &val); +- if (!err && val == 4) { +- p->iotype = UPIO_MEM32; ++ p->membase = devm_ioremap(dev, p->mapbase, p->mapsize); ++ if (!p->membase) ++ return -ENOMEM; ++ ++ err = uart_read_port_properties(p); ++ /* no interrupt -> fall back to polling */ ++ if (err == -ENXIO) ++ err = 0; ++ if (err) ++ return err; ++ ++ switch (p->iotype) { ++ case UPIO_MEM: ++ p->serial_in = dw8250_serial_in; ++ p->serial_out = dw8250_serial_out; ++ break; ++ case UPIO_MEM32: + p->serial_in = dw8250_serial_in32; + p->serial_out = dw8250_serial_out32; ++ break; ++ case UPIO_MEM32BE: ++ p->serial_in = dw8250_serial_in32be; ++ p->serial_out = dw8250_serial_out32be; ++ break; ++ default: ++ return -ENODEV; + } + + if (device_property_read_bool(dev, "dcd-override")) { +@@ -594,9 +618,6 @@ static int dw8250_probe(struct platform_device *pdev) + data->msr_mask_off |= UART_MSR_TERI; + } + +- /* Always ask for fixed clock rate from a property. */ +- device_property_read_u32(dev, "clock-frequency", &p->uartclk); +- + /* If there is separate baudclk, get the rate from it. */ + data->clk = devm_clk_get_optional(dev, "baudclk"); + if (data->clk == NULL) +@@ -766,8 +787,8 @@ static const struct dw8250_platform_data dw8250_armada_38x_data = { + + static const struct dw8250_platform_data dw8250_renesas_rzn1_data = { + .usr_reg = DW_UART_USR, +- .cpr_val = 0x00012f32, +- .quirks = DW_UART_QUIRK_IS_DMA_FC, ++ .cpr_value = 0x00012f32, ++ .quirks = DW_UART_QUIRK_CPR_VALUE | DW_UART_QUIRK_IS_DMA_FC, + }; + + static const struct dw8250_platform_data dw8250_skip_set_rate_data = { +@@ -775,6 +796,11 @@ static const struct dw8250_platform_data dw8250_skip_set_rate_data = { + .quirks = DW_UART_QUIRK_SKIP_SET_RATE, + }; + ++static const struct dw8250_platform_data dw8250_ultrarisc_dp1000_data = { ++ .usr_reg = DW_UART_USR, ++ .quirks = DW_UART_QUIRK_FIXED_TYPE, ++}; ++ + static const struct of_device_id dw8250_of_match[] = { + { .compatible = "snps,dw-apb-uart", .data = &dw8250_dw_apb }, + { .compatible = "cavium,octeon-3860-uart", .data = &dw8250_octeon_3860_data }, +@@ -782,17 +808,23 @@ static const struct of_device_id dw8250_of_match[] = { + { .compatible = "renesas,rzn1-uart", .data = &dw8250_renesas_rzn1_data }, + { .compatible = "sophgo,sg2044-uart", .data = &dw8250_skip_set_rate_data }, + { .compatible = "starfive,jh7100-uart", .data = &dw8250_skip_set_rate_data }, ++ { .compatible = "ultrarisc,dp1000-uart", .data = &dw8250_ultrarisc_dp1000_data }, + { /* Sentinel */ } + }; + MODULE_DEVICE_TABLE(of, dw8250_of_match); + ++static const struct dw8250_platform_data dw8250_apmc0d08 = { ++ .usr_reg = DW_UART_USR, ++ .quirks = DW_UART_QUIRK_APMC0D08, ++}; ++ + static const struct acpi_device_id dw8250_acpi_match[] = { + { "80860F0A", (kernel_ulong_t)&dw8250_dw_apb }, + { "8086228A", (kernel_ulong_t)&dw8250_dw_apb }, + { "AMD0020", (kernel_ulong_t)&dw8250_dw_apb }, + { "AMDI0020", (kernel_ulong_t)&dw8250_dw_apb }, + { "AMDI0022", (kernel_ulong_t)&dw8250_dw_apb }, +- { "APMC0D08", (kernel_ulong_t)&dw8250_dw_apb}, ++ { "APMC0D08", (kernel_ulong_t)&dw8250_apmc0d08 }, + { "BRCM2032", (kernel_ulong_t)&dw8250_dw_apb }, + { "HISI0031", (kernel_ulong_t)&dw8250_dw_apb }, + { "INT33C4", (kernel_ulong_t)&dw8250_dw_apb }, +@@ -800,6 +832,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = { + { "INT3434", (kernel_ulong_t)&dw8250_dw_apb }, + { "INT3435", (kernel_ulong_t)&dw8250_dw_apb }, + { "INTC10EE", (kernel_ulong_t)&dw8250_dw_apb }, ++ { "SOPH0002", (kernel_ulong_t)&dw8250_skip_set_rate_data }, + { }, + }; + MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match); +diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c +index 84843e204a5e..8fc8b6753148 100644 +--- a/drivers/tty/serial/8250/8250_dwlib.c ++++ b/drivers/tty/serial/8250/8250_dwlib.c +@@ -242,7 +242,6 @@ static const struct serial_rs485 dw8250_rs485_supported = { + void dw8250_setup_port(struct uart_port *p) + { + struct dw8250_port_data *pd = p->private_data; +- struct dw8250_data *data = to_dw8250_data(pd); + struct uart_8250_port *up = up_to_u8250p(p); + u32 reg, old_dlf; + +@@ -284,7 +283,7 @@ void dw8250_setup_port(struct uart_port *p) + + reg = dw8250_readl_ext(p, DW_UART_CPR); + if (!reg) { +- reg = data->pdata->cpr_val; ++ reg = pd->cpr_value; + dev_dbg(p->dev, "CPR is not available, using 0x%08x instead\n", reg); + } + if (!reg) +diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h +index f13e91f2cace..7dd2a8e7b780 100644 +--- a/drivers/tty/serial/8250/8250_dwlib.h ++++ b/drivers/tty/serial/8250/8250_dwlib.h +@@ -2,15 +2,10 @@ + /* Synopsys DesignWare 8250 library header file. */ + + #include +-#include + #include +-#include + + #include "8250.h" + +-struct clk; +-struct reset_control; +- + struct dw8250_port_data { + /* Port properties */ + int line; +@@ -19,42 +14,16 @@ struct dw8250_port_data { + struct uart_8250_dma dma; + + /* Hardware configuration */ ++ u32 cpr_value; + u8 dlf_size; + + /* RS485 variables */ + bool hw_rs485_support; + }; + +-struct dw8250_platform_data { +- u8 usr_reg; +- u32 cpr_val; +- unsigned int quirks; +-}; +- +-struct dw8250_data { +- struct dw8250_port_data data; +- const struct dw8250_platform_data *pdata; +- +- int msr_mask_on; +- int msr_mask_off; +- struct clk *clk; +- struct clk *pclk; +- struct notifier_block clk_notifier; +- struct work_struct clk_work; +- struct reset_control *rst; +- +- unsigned int skip_autocfg:1; +- unsigned int uart_16550_compatible:1; +-}; +- + void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, const struct ktermios *old); + void dw8250_setup_port(struct uart_port *p); + +-static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data) +-{ +- return container_of(data, struct dw8250_data, data); +-} +- + static inline u32 dw8250_readl_ext(struct uart_port *p, int offset) + { + if (p->iotype == UPIO_MEM32BE) diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c -index a17803da83f8..dde4293f3109 100644 +index d5ad6cae6b65..87a7bbb8aa19 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1535,7 +1535,7 @@ static inline void __start_tx(struct uart_port *port) @@ -559650,7 +609575,7 @@ index a17803da83f8..dde4293f3109 100644 status = serial8250_rx_chars(up, status); } serial8250_modem_status(up); -@@ -2450,6 +2450,14 @@ int serial8250_do_startup(struct uart_port *port) +@@ -2451,6 +2451,14 @@ int serial8250_do_startup(struct uart_port *port) dev_warn_ratelimited(port->dev, "%s\n", msg); up->dma = NULL; } @@ -559665,11 +609590,2209 @@ index a17803da83f8..dde4293f3109 100644 } /* +diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig +index bdc568a4ab66..6336fadff23e 100644 +--- a/drivers/tty/serial/Kconfig ++++ b/drivers/tty/serial/Kconfig +@@ -411,7 +411,8 @@ config SERIAL_PXA + depends on ARCH_PXA || ARCH_MMP + select SERIAL_CORE + select SERIAL_8250_PXA if SERIAL_8250=y +- select SERIAL_PXA_NON8250 if !SERIAL_8250=y ++ select SERIAL_PXA_NON8250 if !SERIAL_8250=y && !SOC_SPACEMIT_K1X ++ select SERIAL_PXA_K1X if !SERIAL_8250=y && SOC_SPACEMIT_K1X + help + If you have a machine based on an Intel XScale PXA2xx CPU you + can enable its onboard serial ports by enabling this option. +@@ -442,6 +443,22 @@ config SERIAL_PXA_CONSOLE + Unless you have a specific need, you should use SERIAL_8250_PXA + and SERIAL_8250_CONSOLE instead of this. + ++config SERIAL_SPACEMIT_K1X ++ bool "Spacemit k1x serial port support" ++ depends on SOC_SPACEMIT_K1X ++ depends on SERIAL_CORE ++ help ++ If you have a machine based on Spacemit k1x soc, ++ can enable its onboard serial port by enabling this option. ++ ++config SERIAL_SPACEMIT_K1X_CONSOLE ++ bool "Console on spacemit k1x serial port" ++ depends on SERIAL_SPACEMIT_K1X ++ depends on SERIAL_CORE_CONSOLE ++ help ++ If you have enabled the serial port on the Spacemit k1 chip, ++ you can make it the console by answering Y to this option. ++ + config SERIAL_SA1100 + bool "SA1100 serial port support" + depends on ARCH_SA1100 +diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile +index 138abbc89738..0feb268753fb 100644 +--- a/drivers/tty/serial/Makefile ++++ b/drivers/tty/serial/Makefile +@@ -28,6 +28,7 @@ obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o + obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o + obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o + obj-$(CONFIG_SERIAL_PXA_NON8250) += pxa.o ++obj-$(CONFIG_SERIAL_SPACEMIT_K1X) += spacemit_k1x_uart.o + obj-$(CONFIG_SERIAL_SA1100) += sa1100.o + obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o + obj-$(CONFIG_SERIAL_SAMSUNG) += samsung_tty.o +diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c +index ed3953bd0407..469ad26cde48 100644 +--- a/drivers/tty/serial/serial_port.c ++++ b/drivers/tty/serial/serial_port.c +@@ -8,7 +8,10 @@ + + #include + #include ++#include ++#include + #include ++#include + #include + #include + +@@ -146,6 +149,148 @@ void uart_remove_one_port(struct uart_driver *drv, struct uart_port *port) + } + EXPORT_SYMBOL(uart_remove_one_port); + ++/** ++ * __uart_read_properties - read firmware properties of the given UART port ++ * @port: corresponding port ++ * @use_defaults: apply defaults (when %true) or validate the values (when %false) ++ * ++ * The following device properties are supported: ++ * - clock-frequency (optional) ++ * - fifo-size (optional) ++ * - no-loopback-test (optional) ++ * - reg-shift (defaults may apply) ++ * - reg-offset (value may be validated) ++ * - reg-io-width (defaults may apply or value may be validated) ++ * - interrupts (OF only) ++ * - serial [alias ID] (OF only) ++ * ++ * If the port->dev is of struct platform_device type the interrupt line ++ * will be retrieved via platform_get_irq() call against that device. ++ * Otherwise it will be assigned by fwnode_irq_get() call. In both cases ++ * the index 0 of the resource is used. ++ * ++ * The caller is responsible to initialize the following fields of the @port ++ * ->dev (must be valid) ++ * ->flags ++ * ->mapbase ++ * ->mapsize ++ * ->regshift (if @use_defaults is false) ++ * before calling this function. Alternatively the above mentioned fields ++ * may be zeroed, in such case the only ones, that have associated properties ++ * found, will be set to the respective values. ++ * ++ * If no error happened, the ->irq, ->mapbase, ->mapsize will be altered. ++ * The ->iotype is always altered. ++ * ++ * When @use_defaults is true and the respective property is not found ++ * the following values will be applied: ++ * ->regshift = 0 ++ * In this case IRQ must be provided, otherwise an error will be returned. ++ * ++ * When @use_defaults is false and the respective property is found ++ * the following values will be validated: ++ * - reg-io-width (->iotype) ++ * - reg-offset (->mapsize against ->mapbase) ++ * ++ * Returns: 0 on success or negative errno on failure ++ */ ++static int __uart_read_properties(struct uart_port *port, bool use_defaults) ++{ ++ struct device *dev = port->dev; ++ u32 value; ++ int ret; ++ ++ /* Read optional UART functional clock frequency */ ++ device_property_read_u32(dev, "clock-frequency", &port->uartclk); ++ ++ /* Read the registers alignment (default: 8-bit) */ ++ ret = device_property_read_u32(dev, "reg-shift", &value); ++ if (ret) ++ port->regshift = use_defaults ? 0 : port->regshift; ++ else ++ port->regshift = value; ++ ++ /* Read the registers I/O access type (default: MMIO 8-bit) */ ++ ret = device_property_read_u32(dev, "reg-io-width", &value); ++ if (ret) { ++ port->iotype = UPIO_MEM; ++ } else { ++ switch (value) { ++ case 1: ++ port->iotype = UPIO_MEM; ++ break; ++ case 2: ++ port->iotype = UPIO_MEM16; ++ break; ++ case 4: ++ port->iotype = device_is_big_endian(dev) ? UPIO_MEM32BE : UPIO_MEM32; ++ break; ++ default: ++ if (!use_defaults) { ++ dev_err(dev, "Unsupported reg-io-width (%u)\n", value); ++ return -EINVAL; ++ } ++ port->iotype = UPIO_UNKNOWN; ++ break; ++ } ++ } ++ ++ /* Read the address mapping base offset (default: no offset) */ ++ ret = device_property_read_u32(dev, "reg-offset", &value); ++ if (ret) ++ value = 0; ++ ++ /* Check for shifted address mapping overflow */ ++ if (!use_defaults && port->mapsize < value) { ++ dev_err(dev, "reg-offset %u exceeds region size %pa\n", value, &port->mapsize); ++ return -EINVAL; ++ } ++ ++ port->mapbase += value; ++ port->mapsize -= value; ++ ++ /* Read optional FIFO size */ ++ device_property_read_u32(dev, "fifo-size", &port->fifosize); ++ ++ if (device_property_read_bool(dev, "no-loopback-test")) ++ port->flags |= UPF_SKIP_TEST; ++ ++ /* Get index of serial line, if found in DT aliases */ ++ ret = of_alias_get_id(dev_of_node(dev), "serial"); ++ if (ret >= 0) ++ port->line = ret; ++ ++ if (dev_is_platform(dev)) ++ ret = platform_get_irq(to_platform_device(dev), 0); ++ else ++ ret = fwnode_irq_get(dev_fwnode(dev), 0); ++ if (ret == -EPROBE_DEFER) ++ return ret; ++ if (ret > 0) ++ port->irq = ret; ++ else if (use_defaults) ++ /* By default IRQ support is mandatory */ ++ return ret; ++ else ++ port->irq = 0; ++ ++ port->flags |= UPF_SHARE_IRQ; ++ ++ return 0; ++} ++ ++int uart_read_port_properties(struct uart_port *port) ++{ ++ return __uart_read_properties(port, true); ++} ++EXPORT_SYMBOL_GPL(uart_read_port_properties); ++ ++int uart_read_and_validate_port_properties(struct uart_port *port) ++{ ++ return __uart_read_properties(port, false); ++} ++EXPORT_SYMBOL_GPL(uart_read_and_validate_port_properties); ++ + static struct device_driver serial_port_driver = { + .name = "port", + .suppress_bind_attrs = true, +diff --git a/drivers/tty/serial/spacemit_k1x_uart.c b/drivers/tty/serial/spacemit_k1x_uart.c +new file mode 100644 +index 000000000000..6fa51bc4be80 +--- /dev/null ++++ b/drivers/tty/serial/spacemit_k1x_uart.c +@@ -0,0 +1,1979 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Based on drivers/serial/8250.c by Russell King. ++ * ++ * Author: Nicolas Pitre ++ * Created: Feb 20, 2003 ++ * Copyright: (C) 2003 Monta Vista Software, Inc. ++ * Copyright: (C) 2023 Spacemit Co., Ltd. ++ * Note 1: This driver is made separate from the already too overloaded ++ * 8250.c because it needs some kirks of its own and that'll make it ++ * easier to add DMA support. ++ * ++ * Note 2: I'm too sick of device allocation policies for serial ports. ++ * If someone else wants to request an "official" allocation of major/minor ++ * for this driver please be my guest. And don't forget that new hardware ++ * to come from Intel might have more than 3 or 4 of those UARTs. Let's ++ * hope for a better port registration and dynamic device allocation scheme ++ * with the serial core maintainer satisfaction to appear soon. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DMA_BLOCK UART_XMIT_SIZE ++#define DMA_BURST_SIZE (8) ++#define DMA_FIFO_THRESHOLD (32) ++#define DMA_RX_BLOCK_SIZE DMA_BLOCK ++#define DMA_BUF_POLLING_SWITCH (1) ++ ++#define PXA_UART_TX (0) ++#define PXA_UART_RX (1) ++ ++#define NUM_UART_PORTS (10) ++#define BT_UART_PORT (2) ++ ++#define UART_FCR_PXA_BUS32 (0x20) ++#define UART_FCR_PXA_TRAIL (0x10) ++#define UART_FOR (9) ++ ++#define PXA_NAME_LEN (8) ++ ++#define SUPPORT_POWER_QOS (1) ++ ++#define TX_DMA_RUNNING BIT(0) ++#define RX_DMA_RUNNING BIT(1) ++ ++#define PXA_TIMER_TIMEOUT (3 * HZ) ++#define BLOCK_SUSPEND_TIMEOUT (3000) ++ ++struct uart_pxa_dma { ++ unsigned int dma_status; ++ struct dma_chan *txdma_chan; ++ struct dma_chan *rxdma_chan; ++ struct dma_async_tx_descriptor *rx_desc; ++ struct dma_async_tx_descriptor *tx_desc; ++ void *txdma_addr; ++ void *rxdma_addr; ++ dma_addr_t txdma_addr_phys; ++ dma_addr_t rxdma_addr_phys; ++ int tx_stop; ++ int rx_stop; ++ dma_cookie_t rx_cookie; ++ dma_cookie_t tx_cookie; ++ int tx_size; ++ struct tasklet_struct tklet; ++ ++#ifdef CONFIG_PM ++ void *tx_buf_save; ++ int tx_saved_len; ++#endif ++ ++ bool dma_init; ++ ++#if (DMA_BUF_POLLING_SWITCH == 1) ++ int dma_poll_timeout; ++ int dma_poll_max_time; ++#endif ++}; ++ ++struct uart_pxa_port { ++ struct uart_port port; ++ unsigned char ier; ++ unsigned char lcr; ++ unsigned char mcr; ++ unsigned int lsr_break_flag; ++ struct clk *fclk; ++ struct clk *gclk; ++ struct reset_control *resets; ++ char name[PXA_NAME_LEN]; ++ ++ struct timer_list pxa_timer; ++ int edge_wakeup_gpio; ++ struct work_struct uart_tx_lpm_work; ++ int dma_enable; ++ struct uart_pxa_dma uart_dma; ++ unsigned long flags; ++ unsigned int cons_udelay; ++ bool from_resume; ++ bool device_ctrl_rts; ++ bool in_resume; ++ unsigned int current_baud; ++}; ++ ++static void pxa_uart_transmit_dma_cb(void *data); ++static void pxa_uart_receive_dma_cb(void *data); ++static void pxa_uart_transmit_dma_start(struct uart_pxa_port *up, int count); ++static void pxa_uart_receive_dma_start(struct uart_pxa_port *up); ++static inline void wait_for_xmitr(struct uart_pxa_port *up); ++static unsigned int serial_pxa_tx_empty(struct uart_port *port); ++#ifdef CONFIG_PM ++static void _pxa_timer_handler(struct uart_pxa_port *up); ++#endif ++ ++static inline void stop_dma(struct uart_pxa_port *up, int read) ++{ ++ unsigned long flags; ++ struct uart_pxa_dma *pxa_dma = &up->uart_dma; ++ struct dma_chan *channel; ++ ++ if (!pxa_dma->dma_init) ++ return; ++ ++ channel = read ? pxa_dma->rxdma_chan : pxa_dma->txdma_chan; ++ ++ dmaengine_terminate_all(channel); ++ spin_lock_irqsave(&up->port.lock, flags); ++ if (read) ++ pxa_dma->dma_status &= ~RX_DMA_RUNNING; ++ else ++ pxa_dma->dma_status &= ~TX_DMA_RUNNING; ++ spin_unlock_irqrestore(&up->port.lock, flags); ++} ++ ++static inline unsigned int serial_in(struct uart_pxa_port *up, int offset) ++{ ++ offset <<= 2; ++ return readl(up->port.membase + offset); ++} ++ ++static inline void serial_out(struct uart_pxa_port *up, int offset, int value) ++{ ++ offset <<= 2; ++ writel(value, up->port.membase + offset); ++} ++ ++static void serial_pxa_enable_ms(struct uart_port *port) ++{ ++ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ ++ if (up->dma_enable) ++ return; ++ ++ up->ier |= UART_IER_MSI; ++ serial_out(up, UART_IER, up->ier); ++} ++ ++static void serial_pxa_stop_tx(struct uart_port *port) ++{ ++ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ unsigned int timeout = 0x100000 / up->cons_udelay; ++ unsigned long flags; ++ ++ if (up->dma_enable) { ++ up->uart_dma.tx_stop = 1; ++ ++ if (up->ier & UART_IER_DMAE && up->uart_dma.dma_init) { ++ while (dma_async_is_tx_complete(up->uart_dma.txdma_chan, ++ up->uart_dma.tx_cookie, NULL, NULL) ++ != DMA_COMPLETE && (timeout-- > 0)) { ++ spin_unlock(&up->port.lock); ++ local_irq_save(flags); ++ local_irq_enable(); ++ udelay(up->cons_udelay); ++ local_irq_disable(); ++ local_irq_restore(flags); ++ spin_lock(&up->port.lock); ++ } ++ ++ WARN_ON_ONCE(timeout == 0); ++ } ++ } else { ++ if (up->ier & UART_IER_THRI) { ++ up->ier &= ~UART_IER_THRI; ++ serial_out(up, UART_IER, up->ier); ++ } ++ } ++} ++ ++static void serial_pxa_stop_rx(struct uart_port *port) ++{ ++ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ ++ if (up->dma_enable) { ++ if (up->ier & UART_IER_DMAE) { ++ spin_unlock_irqrestore(&up->port.lock, up->flags); ++ stop_dma(up, PXA_UART_RX); ++ spin_lock_irqsave(&up->port.lock, up->flags); ++ } ++ up->uart_dma.rx_stop = 1; ++ } else { ++ up->ier &= ~UART_IER_RLSI; ++ up->port.read_status_mask &= ~UART_LSR_DR; ++ serial_out(up, UART_IER, up->ier); ++ } ++} ++ ++static inline void receive_chars(struct uart_pxa_port *up, int *status) ++{ ++ unsigned int ch, flag; ++ int max_count = 256; ++ ++ do { ++ spin_lock_irqsave(&up->port.lock, up->flags); ++ up->ier &= ~UART_IER_RTOIE; ++ serial_out(up, UART_IER, up->ier); ++ spin_unlock_irqrestore(&up->port.lock, up->flags); ++ ++ ch = serial_in(up, UART_RX); ++ flag = TTY_NORMAL; ++ up->port.icount.rx++; ++ ++ if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE | ++ UART_LSR_FE | UART_LSR_OE))) { ++ if (*status & UART_LSR_BI) { ++ *status &= ~(UART_LSR_FE | UART_LSR_PE); ++ up->port.icount.brk++; ++ if (uart_handle_break(&up->port)) ++ goto ignore_char; ++ } else if (*status & UART_LSR_PE) { ++ up->port.icount.parity++; ++ } else if (*status & UART_LSR_FE) { ++ up->port.icount.frame++; ++ } ++ ++ if (*status & UART_LSR_OE) ++ up->port.icount.overrun++; ++ ++ *status &= up->port.read_status_mask; ++ ++#ifdef CONFIG_SERIAL_SPACEMIT_K1X_CONSOLE ++ if (up->port.line == up->port.cons->index) { ++ *status |= up->lsr_break_flag; ++ up->lsr_break_flag = 0; ++ } ++#endif ++ if (*status & UART_LSR_BI) ++ flag = TTY_BREAK; ++ else if (*status & UART_LSR_PE) ++ flag = TTY_PARITY; ++ else if (*status & UART_LSR_FE) ++ flag = TTY_FRAME; ++ } ++ if (!uart_handle_sysrq_char(&up->port, ch)) ++ uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag); ++ ++ignore_char: ++ *status = serial_in(up, UART_LSR); ++ } while ((*status & UART_LSR_DR) && (max_count-- > 0)); ++ tty_flip_buffer_push(&up->port.state->port); ++ ++ spin_lock_irqsave(&up->port.lock, up->flags); ++ up->ier |= UART_IER_RTOIE; ++ serial_out(up, UART_IER, up->ier); ++ spin_unlock_irqrestore(&up->port.lock, up->flags); ++} ++ ++static void transmit_chars(struct uart_pxa_port *up) ++{ ++ struct circ_buf *xmit = &up->port.state->xmit; ++ int count; ++ ++ if (up->port.x_char) { ++ serial_out(up, UART_TX, up->port.x_char); ++ up->port.icount.tx++; ++ up->port.x_char = 0; ++ return; ++ } ++ if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { ++ spin_lock_irqsave(&up->port.lock, up->flags); ++ serial_pxa_stop_tx(&up->port); ++ spin_unlock_irqrestore(&up->port.lock, up->flags); ++ return; ++ } ++ ++ count = up->port.fifosize / 2; ++ do { ++ serial_out(up, UART_TX, xmit->buf[xmit->tail]); ++ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); ++ up->port.icount.tx++; ++ if (uart_circ_empty(xmit)) ++ break; ++ } while (--count > 0); ++ ++ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) ++ uart_write_wakeup(&up->port); ++ ++ if (uart_circ_empty(xmit)) { ++ spin_lock_irqsave(&up->port.lock, up->flags); ++ serial_pxa_stop_tx(&up->port); ++ spin_unlock_irqrestore(&up->port.lock, up->flags); ++ } ++} ++ ++static inline void dma_receive_chars(struct uart_pxa_port *up, int *status) ++{ ++ struct tty_port *port = &up->port.state->port; ++ unsigned char ch; ++ int max_count = 256; ++ int count = 0; ++ unsigned char *tmp; ++ unsigned int flag = TTY_NORMAL; ++ struct uart_pxa_dma *pxa_dma = &up->uart_dma; ++ struct dma_tx_state dma_state; ++ ++ if (!pxa_dma->dma_init) ++ return; ++ ++ dmaengine_pause(pxa_dma->rxdma_chan); ++ dmaengine_tx_status(pxa_dma->rxdma_chan, pxa_dma->rx_cookie, ++ &dma_state); ++ count = DMA_RX_BLOCK_SIZE - dma_state.residue; ++ tmp = pxa_dma->rxdma_addr; ++ if (up->port.sysrq) { ++ while (count > 0) { ++ if (!uart_handle_sysrq_char(&up->port, *tmp)) { ++ uart_insert_char(&up->port, *status, 0, *tmp, flag); ++ up->port.icount.rx++; ++ } ++ tmp++; ++ count--; ++ } ++ } else { ++ tty_insert_flip_string(port, tmp, count); ++ up->port.icount.rx += count; ++ } ++ ++ do { ++ ch = serial_in(up, UART_RX); ++ flag = TTY_NORMAL; ++ up->port.icount.rx++; ++ ++ if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE | ++ UART_LSR_FE | UART_LSR_OE))) { ++ if (*status & UART_LSR_BI) { ++ *status &= ~(UART_LSR_FE | UART_LSR_PE); ++ up->port.icount.brk++; ++ if (uart_handle_break(&up->port)) ++ goto ignore_char2; ++ } else if (*status & UART_LSR_PE) { ++ up->port.icount.parity++; ++ } else if (*status & UART_LSR_FE) { ++ up->port.icount.frame++; ++ } ++ ++ if (*status & UART_LSR_OE) ++ up->port.icount.overrun++; ++ ++ *status &= up->port.read_status_mask; ++ ++#ifdef CONFIG_SERIAL_SPACEMIT_K1X_CONSOLE ++ if (up->port.line == up->port.cons->index) { ++ *status |= up->lsr_break_flag; ++ up->lsr_break_flag = 0; ++ } ++#endif ++ if (*status & UART_LSR_BI) ++ flag = TTY_BREAK; ++ else if (*status & UART_LSR_PE) ++ flag = TTY_PARITY; ++ else if (*status & UART_LSR_FE) ++ flag = TTY_FRAME; ++ } ++ if (!uart_handle_sysrq_char(&up->port, ch)) ++ uart_insert_char(&up->port, *status, UART_LSR_OE, ++ ch, flag); ++ignore_char2: ++ *status = serial_in(up, UART_LSR); ++ } while ((*status & UART_LSR_DR) && (max_count-- > 0)); ++ ++ tty_flip_buffer_push(port); ++ stop_dma(up, 1); ++ if (pxa_dma->rx_stop) ++ return; ++ pxa_uart_receive_dma_start(up); ++} ++ ++static void serial_pxa_start_tx(struct uart_port *port) ++{ ++ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ ++ if (up->dma_enable) { ++ up->uart_dma.tx_stop = 0; ++ tasklet_schedule(&up->uart_dma.tklet); ++ } else { ++ if (!(up->ier & UART_IER_THRI)) { ++ up->ier |= UART_IER_THRI; ++ serial_out(up, UART_IER, up->ier); ++ } ++ } ++} ++ ++static inline void check_modem_status(struct uart_pxa_port *up) ++{ ++ int status; ++ ++ status = serial_in(up, UART_MSR); ++ ++ if ((status & UART_MSR_ANY_DELTA) == 0) ++ return; ++ ++ spin_lock(&up->port.lock); ++ if (status & UART_MSR_TERI) ++ up->port.icount.rng++; ++ if (status & UART_MSR_DDSR) ++ up->port.icount.dsr++; ++ if (status & UART_MSR_DDCD) ++ uart_handle_dcd_change(&up->port, status & UART_MSR_DCD); ++ if (status & UART_MSR_DCTS) ++ uart_handle_cts_change(&up->port, status & UART_MSR_CTS); ++ spin_unlock(&up->port.lock); ++ ++ wake_up_interruptible(&up->port.state->port.delta_msr_wait); ++} ++ ++static int serial_pxa_is_open(struct uart_pxa_port *up); ++ ++static inline irqreturn_t serial_pxa_irq(int irq, void *dev_id) ++{ ++ struct uart_pxa_port *up = dev_id; ++ unsigned int iir, lsr; ++ ++ iir = serial_in(up, UART_IIR); ++ if (iir & UART_IIR_NO_INT) ++ return IRQ_NONE; ++ ++ if (!serial_pxa_is_open(up)) ++ return IRQ_HANDLED; ++ ++#ifdef CONFIG_PM ++#if SUPPORT_POWER_QOS ++ if (!mod_timer(&up->pxa_timer, jiffies + PXA_TIMER_TIMEOUT)) ++ pm_runtime_get_sync(up->port.dev); ++#endif ++#endif ++ ++ lsr = serial_in(up, UART_LSR); ++ if (up->dma_enable) { ++ if (lsr & UART_LSR_FIFOE) ++ dma_receive_chars(up, &lsr); ++ } else { ++ if (lsr & UART_LSR_DR) { ++ receive_chars(up, &lsr); ++ if (up->edge_wakeup_gpio >= 0) ++ pm_wakeup_event(up->port.dev, BLOCK_SUSPEND_TIMEOUT); ++ } ++ ++ check_modem_status(up); ++ if (lsr & UART_LSR_THRE) { ++ transmit_chars(up); ++ while (!serial_pxa_tx_empty((struct uart_port *)dev_id)) ++ ; ++ } ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static unsigned int serial_pxa_tx_empty(struct uart_port *port) ++{ ++ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ unsigned long flags; ++ unsigned int ret; ++ ++ spin_lock_irqsave(&up->port.lock, flags); ++ if (up->dma_enable) { ++ if (up->ier & UART_IER_DMAE) { ++ if (up->uart_dma.dma_status & TX_DMA_RUNNING) { ++ spin_unlock_irqrestore(&up->port.lock, flags); ++ return 0; ++ } ++ } ++ } ++ ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0; ++ spin_unlock_irqrestore(&up->port.lock, flags); ++ ++ return ret; ++} ++ ++static unsigned int serial_pxa_get_mctrl(struct uart_port *port) ++{ ++ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ unsigned char status; ++ unsigned int ret; ++ ++ status = serial_in(up, UART_MSR); ++ ++ ret = 0; ++ if (status & UART_MSR_DCD) ++ ret |= TIOCM_CAR; ++ if (status & UART_MSR_RI) ++ ret |= TIOCM_RNG; ++ if (status & UART_MSR_DSR) ++ ret |= TIOCM_DSR; ++ if (status & UART_MSR_CTS) ++ ret |= TIOCM_CTS; ++ return ret; ++} ++ ++static void serial_pxa_set_mctrl(struct uart_port *port, unsigned int mctrl) ++{ ++ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ unsigned char mcr = 0; ++ int hostwake = 0; ++ ++ if (mctrl & TIOCM_RTS) ++ mcr |= UART_MCR_RTS; ++ if (mctrl & TIOCM_DTR) ++ mcr |= UART_MCR_DTR; ++ if (mctrl & TIOCM_OUT1) ++ mcr |= UART_MCR_OUT1; ++ if (mctrl & TIOCM_OUT2) ++ mcr |= UART_MCR_OUT2; ++ if (mctrl & TIOCM_LOOP) ++ mcr |= UART_MCR_LOOP; ++ ++ if (up->device_ctrl_rts) { ++ if ((hostwake || up->in_resume) && (mctrl & TIOCM_RTS)) ++ mcr &= ~UART_MCR_RTS; ++ } ++ ++ mcr |= up->mcr; ++ ++ serial_out(up, UART_MCR, mcr); ++ ++#ifdef CONFIG_BT ++ if (up->port.line == BT_UART_PORT) ++ pr_debug("%s: rts: 0x%x\n", __func__, mcr & UART_MCR_RTS); ++#endif ++} ++ ++static void serial_pxa_break_ctl(struct uart_port *port, int break_state) ++{ ++ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&up->port.lock, flags); ++ if (break_state == -1) ++ up->lcr |= UART_LCR_SBC; ++ else ++ up->lcr &= ~UART_LCR_SBC; ++ serial_out(up, UART_LCR, up->lcr); ++ spin_unlock_irqrestore(&up->port.lock, flags); ++} ++ ++static void pxa_uart_transmit_dma_start(struct uart_pxa_port *up, int count) ++{ ++ struct uart_pxa_dma *pxa_dma = &up->uart_dma; ++ struct dma_slave_config slave_config; ++ int ret; ++ ++ if (!pxa_dma->txdma_chan) { ++ dev_err(up->port.dev, "tx dma channel is not initialized\n"); ++ return; ++ } ++ ++ slave_config.direction = DMA_MEM_TO_DEV; ++ slave_config.dst_addr = up->port.mapbase; ++ slave_config.dst_maxburst = DMA_BURST_SIZE; ++ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; ++ ++ ret = dmaengine_slave_config(pxa_dma->txdma_chan, &slave_config); ++ if (ret) { ++ dev_err(up->port.dev, ++ "%s: dmaengine slave config err.\n", __func__); ++ return; ++ } ++ ++ pxa_dma->tx_size = count; ++ pxa_dma->tx_desc = ++ dmaengine_prep_slave_single(pxa_dma->txdma_chan, pxa_dma->txdma_addr_phys, ++ count, DMA_MEM_TO_DEV, 0); ++ if (!pxa_dma->tx_desc) { ++ dev_err(up->port.dev, ++ "%s: Unable to get desc for Tx\n", __func__); ++ return; ++ } ++ pxa_dma->tx_desc->callback = pxa_uart_transmit_dma_cb; ++ pxa_dma->tx_desc->callback_param = up; ++ ++ pxa_dma->tx_cookie = dmaengine_submit(pxa_dma->tx_desc); ++#ifdef CONFIG_PM ++#if SUPPORT_POWER_QOS ++ pm_runtime_get_sync(up->port.dev); ++#endif ++#endif ++ ++ dma_async_issue_pending(pxa_dma->txdma_chan); ++} ++ ++static void pxa_uart_receive_dma_start(struct uart_pxa_port *up) ++{ ++ unsigned long flags; ++ struct uart_pxa_dma *uart_dma = &up->uart_dma; ++ struct dma_slave_config slave_config; ++ int ret; ++ ++ if (!uart_dma->rxdma_chan) { ++ dev_err(up->port.dev, "rx dma channel is not initialized\n"); ++ return; ++ } ++ ++ spin_lock_irqsave(&up->port.lock, flags); ++ if (uart_dma->dma_status & RX_DMA_RUNNING) { ++ spin_unlock_irqrestore(&up->port.lock, flags); ++ return; ++ } ++ uart_dma->dma_status |= RX_DMA_RUNNING; ++ spin_unlock_irqrestore(&up->port.lock, flags); ++ ++ slave_config.direction = DMA_DEV_TO_MEM; ++ slave_config.src_addr = up->port.mapbase; ++ slave_config.src_maxburst = DMA_BURST_SIZE; ++ slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; ++ ++ ret = dmaengine_slave_config(uart_dma->rxdma_chan, &slave_config); ++ if (ret) { ++ dev_err(up->port.dev, "%s: dmaengine slave config err.\n", __func__); ++ return; ++ } ++ ++ uart_dma->rx_desc = ++ dmaengine_prep_slave_single(uart_dma->rxdma_chan, ++ uart_dma->rxdma_addr_phys, DMA_RX_BLOCK_SIZE, ++ DMA_DEV_TO_MEM, 0); ++ if (!uart_dma->rx_desc) { ++ dev_err(up->port.dev, "%s: Unable to get desc for Rx\n", __func__); ++ return; ++ } ++ uart_dma->rx_desc->callback = pxa_uart_receive_dma_cb; ++ uart_dma->rx_desc->callback_param = up; ++ ++ uart_dma->rx_cookie = dmaengine_submit(uart_dma->rx_desc); ++ dma_async_issue_pending(uart_dma->rxdma_chan); ++} ++ ++static void pxa_uart_receive_dma_cb(void *data) ++{ ++ unsigned long flags; ++ struct uart_pxa_port *up = (struct uart_pxa_port *)data; ++ struct uart_pxa_dma *pxa_dma = &up->uart_dma; ++ struct tty_port *port = &up->port.state->port; ++ unsigned int count; ++ unsigned char *tmp = pxa_dma->rxdma_addr; ++ struct dma_tx_state dma_state; ++#if (DMA_BUF_POLLING_SWITCH == 1) ++ unsigned int buf_used, trail_cnt = 0; ++ unsigned char *trail_addr, *last_burst_addr; ++ u8 mark_1bytes = 0xff; ++ int timeout = 5000, cycle = 2; /* us */ ++ int times_1 = 0, times_2 = 0, duration_time_us; ++#endif ++ ++#ifdef CONFIG_PM ++#if SUPPORT_POWER_QOS ++ if (!mod_timer(&up->pxa_timer, jiffies + PXA_TIMER_TIMEOUT)) ++ pm_runtime_get_sync(up->port.dev); ++#endif ++#endif ++ ++ dmaengine_tx_status(pxa_dma->rxdma_chan, pxa_dma->rx_cookie, &dma_state); ++ count = DMA_RX_BLOCK_SIZE - dma_state.residue; ++ ++#if (DMA_BUF_POLLING_SWITCH == 1) ++ buf_used = count; ++ if (count > 0 && count < DMA_FIFO_THRESHOLD) { ++ trail_cnt = count; ++ trail_addr = tmp; ++ times_1 = timeout / cycle; ++ times_2 = timeout / cycle; ++ ++ while ((*trail_addr == mark_1bytes) && (times_1-- >= 0)) ++ udelay(cycle); ++ ++ if (trail_cnt > 1) { ++ trail_addr = trail_addr + trail_cnt - 1; ++ while ((*trail_addr == mark_1bytes) && (times_2-- >= 0)) ++ udelay(cycle); ++ } ++ ++ if (times_1 <= 0 || times_2 <= 0) ++ pxa_dma->dma_poll_timeout++; ++ } ++ ++ if (count >= DMA_FIFO_THRESHOLD && count < DMA_RX_BLOCK_SIZE) { ++ trail_cnt = (count % DMA_BURST_SIZE) + (DMA_FIFO_THRESHOLD - DMA_BURST_SIZE); ++ trail_addr = tmp + count - trail_cnt; ++ ++ #if (DMA_BURST_SIZE == DMA_FIFO_THRESHOLD) ++ if (trail_cnt == 0) { ++ trail_addr = tmp + count - DMA_BURST_SIZE; ++ trail_cnt = DMA_BURST_SIZE; ++ } ++ #endif ++ ++ times_1 = timeout / cycle; ++ times_2 = timeout / cycle; ++ ++ while ((*trail_addr == mark_1bytes) && (times_1-- >= 0)) ++ udelay(cycle); ++ ++ if (trail_cnt > 1) { ++ trail_addr = trail_addr + trail_cnt - 1; ++ while ((*trail_addr == mark_1bytes) && (times_2-- >= 0)) ++ udelay(cycle); ++ } ++ ++ if (times_1 <= 0 || times_2 <= 0) ++ pxa_dma->dma_poll_timeout++; ++ } ++ ++ if (count == DMA_RX_BLOCK_SIZE) { ++ last_burst_addr = tmp + DMA_RX_BLOCK_SIZE - DMA_BURST_SIZE; ++ trail_cnt = DMA_BURST_SIZE; ++ times_1 = timeout / cycle; ++ times_2 = timeout / cycle; ++ ++ while ((*last_burst_addr == mark_1bytes) && (times_1-- >= 0)) ++ udelay(cycle); ++ ++ if (trail_cnt > 1) { ++ last_burst_addr = tmp + DMA_RX_BLOCK_SIZE - 1; ++ while ((*last_burst_addr == mark_1bytes) && (times_2-- >= 0)) ++ udelay(cycle); ++ } ++ ++ if (times_1 <= 0 || times_2 <= 0) ++ pxa_dma->dma_poll_timeout++; ++ } ++#endif /* #if (DMA_BUF_POLLING_SWITCH == 1) */ ++ ++ if (up->port.sysrq) { ++ while (count > 0) { ++ if (!uart_handle_sysrq_char(&up->port, *tmp)) { ++ tty_insert_flip_char(port, *tmp, TTY_NORMAL); ++ up->port.icount.rx++; ++ } ++ tmp++; ++ count--; ++ } ++ } else { ++ tty_insert_flip_string(port, tmp, count); ++ up->port.icount.rx += count; ++ } ++ tty_flip_buffer_push(port); ++ ++ spin_lock_irqsave(&up->port.lock, flags); ++ pxa_dma->dma_status &= ~RX_DMA_RUNNING; ++ spin_unlock_irqrestore(&up->port.lock, flags); ++ ++#if (DMA_BUF_POLLING_SWITCH == 1) ++ if (buf_used > 0) { ++ tmp = pxa_dma->rxdma_addr; ++ memset(tmp, mark_1bytes, buf_used); ++ } ++ ++ if (times_1 > 0) { ++ duration_time_us = (timeout / cycle - times_1) * cycle; ++ if (pxa_dma->dma_poll_max_time < duration_time_us) ++ pxa_dma->dma_poll_max_time = duration_time_us; ++ } ++ if (times_2 > 0) { ++ duration_time_us = (timeout / cycle - times_2) * cycle; ++ if (pxa_dma->dma_poll_max_time < duration_time_us) ++ pxa_dma->dma_poll_max_time = duration_time_us; ++ } ++ if (times_1 > 0 && times_2 > 0) { ++ duration_time_us = (2 * timeout / cycle - times_1 - times_2) * cycle; ++ if (pxa_dma->dma_poll_max_time < duration_time_us) ++ pxa_dma->dma_poll_max_time = duration_time_us; ++ } ++#endif /* #if (DMA_BUF_POLLING_SWITCH == 1) */ ++ ++ if (pxa_dma->rx_stop || !serial_pxa_is_open(up)) ++ return; ++ pxa_uart_receive_dma_start(up); ++ if (up->edge_wakeup_gpio >= 0) ++ pm_wakeup_event(up->port.dev, BLOCK_SUSPEND_TIMEOUT); ++} ++ ++static void pxa_uart_transmit_dma_cb(void *data) ++{ ++ struct uart_pxa_port *up = (struct uart_pxa_port *)data; ++ struct uart_pxa_dma *pxa_dma = &up->uart_dma; ++ struct circ_buf *xmit = &up->port.state->xmit; ++ ++ if (up->from_resume) ++ up->from_resume = false; ++ ++ if (dma_async_is_tx_complete(pxa_dma->txdma_chan, pxa_dma->tx_cookie, ++ NULL, NULL) == DMA_COMPLETE) ++ schedule_work(&up->uart_tx_lpm_work); ++ ++ spin_lock_irqsave(&up->port.lock, up->flags); ++ pxa_dma->dma_status &= ~TX_DMA_RUNNING; ++ spin_unlock_irqrestore(&up->port.lock, up->flags); ++ ++ if (pxa_dma->tx_stop || !serial_pxa_is_open(up)) ++ return; ++ ++ if (up->port.x_char) { ++ serial_out(up, UART_TX, up->port.x_char); ++ up->port.icount.tx++; ++ up->port.x_char = 0; ++ } ++ ++ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) ++ uart_write_wakeup(&up->port); ++ ++ if (!uart_circ_empty(xmit)) ++ tasklet_schedule(&pxa_dma->tklet); ++} ++ ++static void pxa_uart_dma_init(struct uart_pxa_port *up) ++{ ++ struct uart_pxa_dma *pxa_dma = &up->uart_dma; ++ dma_cap_mask_t mask; ++ ++ dma_cap_zero(mask); ++ dma_cap_set(DMA_SLAVE, mask); ++ ++ if (!pxa_dma->rxdma_chan) { ++ pxa_dma->rxdma_chan = dma_request_slave_channel(up->port.dev, "rx"); ++ if (IS_ERR_OR_NULL(pxa_dma->rxdma_chan)) { ++ dev_WARN_ONCE(up->port.dev, 1, "failed to request rx dma channel\n"); ++ goto out; ++ } ++ } ++ ++ if (!pxa_dma->txdma_chan) { ++ pxa_dma->txdma_chan = dma_request_slave_channel(up->port.dev, "tx"); ++ if (IS_ERR_OR_NULL(pxa_dma->txdma_chan)) { ++ dev_WARN_ONCE(up->port.dev, 1, "failed to request tx dma channel\n"); ++ goto err_txdma; ++ } ++ } ++ ++ if (!pxa_dma->txdma_addr) { ++ pxa_dma->txdma_addr = ++ dma_direct_alloc(up->port.dev, DMA_BLOCK, ++ &pxa_dma->txdma_addr_phys, GFP_KERNEL, ++ DMA_ATTR_FORCE_CONTIGUOUS); ++ if (!pxa_dma->txdma_addr) { ++ dev_WARN_ONCE(up->port.dev, 1, "failed to allocate tx dma memory\n"); ++ goto txdma_err_alloc; ++ } ++ } ++ ++ if (!pxa_dma->rxdma_addr) { ++ pxa_dma->rxdma_addr = ++ dma_direct_alloc(up->port.dev, DMA_RX_BLOCK_SIZE, ++ &pxa_dma->rxdma_addr_phys, GFP_KERNEL, ++ DMA_ATTR_FORCE_CONTIGUOUS); ++ if (!pxa_dma->rxdma_addr) { ++ dev_WARN_ONCE(up->port.dev, 1, "failed to allocate rx dma memory\n"); ++ goto rxdma_err_alloc; ++ } ++ } ++ ++ pxa_dma->dma_status = 0; ++ pxa_dma->dma_init = true; ++ return; ++ ++rxdma_err_alloc: ++ dma_direct_free(up->port.dev, DMA_BLOCK, pxa_dma->txdma_addr, ++ pxa_dma->txdma_addr_phys, DMA_ATTR_FORCE_CONTIGUOUS); ++ pxa_dma->txdma_addr = NULL; ++txdma_err_alloc: ++ dma_release_channel(pxa_dma->txdma_chan); ++ pxa_dma->txdma_chan = NULL; ++err_txdma: ++ dma_release_channel(pxa_dma->rxdma_chan); ++ pxa_dma->rxdma_chan = NULL; ++out: ++ pxa_dma->dma_init = false; ++} ++ ++static void pxa_uart_dma_uninit(struct uart_pxa_port *up) ++{ ++ struct uart_pxa_dma *pxa_dma; ++ ++ pxa_dma = &up->uart_dma; ++ ++ stop_dma(up, PXA_UART_TX); ++ stop_dma(up, PXA_UART_RX); ++ ++ pxa_dma->dma_init = false; ++ ++ if (pxa_dma->txdma_chan) { ++ dma_release_channel(pxa_dma->txdma_chan); ++ pxa_dma->txdma_chan = NULL; ++ } ++ ++ if (pxa_dma->txdma_addr) { ++ dma_direct_free(up->port.dev, DMA_BLOCK, pxa_dma->txdma_addr, ++ pxa_dma->txdma_addr_phys, ++ DMA_ATTR_FORCE_CONTIGUOUS); ++ pxa_dma->txdma_addr = NULL; ++ } ++ ++ if (pxa_dma->rxdma_chan) { ++ dma_release_channel(pxa_dma->rxdma_chan); ++ pxa_dma->rxdma_chan = NULL; ++ } ++ ++ if (pxa_dma->rxdma_addr) { ++ dma_direct_free(up->port.dev, DMA_RX_BLOCK_SIZE, pxa_dma->rxdma_addr, ++ pxa_dma->rxdma_addr_phys, ++ DMA_ATTR_FORCE_CONTIGUOUS); ++ pxa_dma->rxdma_addr = NULL; ++ } ++} ++ ++static void uart_task_action(unsigned long data) ++{ ++ struct uart_pxa_port *up = (struct uart_pxa_port *)data; ++ struct circ_buf *xmit = &up->port.state->xmit; ++ unsigned char *tmp = up->uart_dma.txdma_addr; ++ unsigned long flags; ++ int count = 0, c; ++ ++ if (up->uart_dma.tx_stop || up->port.suspended || ++ !serial_pxa_is_open(up) || up->from_resume) ++ return; ++ ++ spin_lock_irqsave(&up->port.lock, flags); ++ if (up->uart_dma.dma_status & TX_DMA_RUNNING) { ++ spin_unlock_irqrestore(&up->port.lock, flags); ++ return; ++ } ++ ++ up->uart_dma.dma_status |= TX_DMA_RUNNING; ++ while (1) { ++ c = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); ++ if (c <= 0) ++ break; ++ ++ memcpy(tmp, xmit->buf + xmit->tail, c); ++ xmit->tail = (xmit->tail + c) & (UART_XMIT_SIZE - 1); ++ tmp += c; ++ count += c; ++ up->port.icount.tx += c; ++ } ++ spin_unlock_irqrestore(&up->port.lock, flags); ++ ++ pr_debug("count =%d", count); ++ pxa_uart_transmit_dma_start(up, count); ++} ++ ++static int serial_pxa_startup(struct uart_port *port) ++{ ++ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ unsigned long flags; ++ int tmp = 0; ++ ++ if (port->line == 3) ++ up->mcr |= UART_MCR_AFE; ++ else ++ up->mcr = 0; ++ ++ up->port.uartclk = clk_get_rate(up->fclk); ++ ++ enable_irq(up->port.irq); ++ ++ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO); ++ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | ++ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); ++ serial_out(up, UART_FCR, 0); ++ ++ (void)serial_in(up, UART_LSR); ++ (void)serial_in(up, UART_RX); ++ (void)serial_in(up, UART_IIR); ++ (void)serial_in(up, UART_MSR); ++ ++ serial_out(up, UART_LCR, UART_LCR_WLEN8); ++ ++ spin_lock_irqsave(&up->port.lock, flags); ++ up->port.mctrl |= TIOCM_OUT2; ++ tmp = serial_in(up, UART_MCR); ++ tmp |= TIOCM_OUT2; ++ serial_out(up, UART_MCR, tmp); ++ spin_unlock_irqrestore(&up->port.lock, flags); ++ ++ if (up->dma_enable) { ++ pxa_uart_dma_init(up); ++ up->uart_dma.rx_stop = 0; ++ pxa_uart_receive_dma_start(up); ++ tasklet_init(&up->uart_dma.tklet, uart_task_action, (unsigned long)up); ++ } ++ ++ spin_lock_irqsave(&up->port.lock, flags); ++ if (up->dma_enable) ++ up->ier = UART_IER_DMAE | UART_IER_UUE; ++ else ++ up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE | UART_IER_UUE; ++ serial_out(up, UART_IER, up->ier); ++ spin_unlock_irqrestore(&up->port.lock, flags); ++ ++ (void)serial_in(up, UART_LSR); ++ (void)serial_in(up, UART_RX); ++ (void)serial_in(up, UART_IIR); ++ (void)serial_in(up, UART_MSR); ++ ++ return 0; ++} ++ ++static void serial_pxa_shutdown(struct uart_port *port) ++{ ++ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ unsigned long flags; ++ unsigned int tmp = 0; ++ ++ disable_irq(up->port.irq); ++ if (up->dma_enable) { ++ tasklet_kill(&up->uart_dma.tklet); ++ up->uart_dma.tx_stop = 1; ++ up->uart_dma.rx_stop = 1; ++ pxa_uart_dma_uninit(up); ++ } ++ ++ flush_work(&up->uart_tx_lpm_work); ++ ++ spin_lock_irqsave(&up->port.lock, flags); ++ up->ier = 0; ++ serial_out(up, UART_IER, 0); ++ ++ up->port.mctrl &= ~TIOCM_OUT2; ++ tmp = serial_in(up, UART_MCR); ++ tmp &= ~TIOCM_OUT2; ++ serial_out(up, UART_MCR, tmp); ++ spin_unlock_irqrestore(&up->port.lock, flags); ++ ++ serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC); ++ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | ++ UART_FCR_CLEAR_RCVR | ++ UART_FCR_CLEAR_XMIT); ++ serial_out(up, UART_FCR, 0); ++} ++ ++static int pxa_set_baudrate_clk(struct uart_port *port, unsigned int baud) ++{ ++ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ unsigned long rate; ++ int ret; ++ ++ if (up->current_baud == baud) ++ return 0; ++ ++ switch (baud) { ++ case 500000: ++ case 1000000: ++ case 1500000: ++ case 3000000: ++ rate = 48000000; ++ break; ++ case 576000: ++ case 1152000: ++ case 2500000: ++ case 4000000: ++ rate = 73000000; ++ break; ++ case 2000000: ++ case 3500000: ++ rate = 58000000; ++ break; ++ default: ++ rate = 14700000; ++ break; ++ } ++ ++ ret = clk_set_rate(up->fclk, rate); ++ if (ret < 0) { ++ dev_err(port->dev, ++ "Failed to set clk rate %lu\n", rate); ++ return ret; ++ } ++ ++ up->port.uartclk = clk_get_rate(up->fclk); ++ up->current_baud = baud; ++ ++ return 0; ++} ++ ++static void serial_pxa_set_termios(struct uart_port *port, ++ struct ktermios *termios, ++ const struct ktermios *old) ++{ ++ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ unsigned char cval, fcr = 0; ++ unsigned long flags; ++ unsigned int baud, quot; ++ unsigned int dll; ++ int ret; ++ ++ if (up->dma_enable && up->uart_dma.dma_init) ++ stop_dma(up, PXA_UART_RX); ++ ++ cval = UART_LCR_WLEN(tty_get_char_size(termios->c_cflag)); ++ ++ if (termios->c_cflag & CSTOPB) ++ cval |= UART_LCR_STOP; ++ if (termios->c_cflag & PARENB) ++ cval |= UART_LCR_PARITY; ++ if (!(termios->c_cflag & PARODD)) ++ cval |= UART_LCR_EPAR; ++ ++ baud = uart_get_baud_rate(port, termios, old, 0, 4000000); ++ if (!baud) ++ baud = 9600; ++ ret = pxa_set_baudrate_clk(port, baud); ++ if (ret < 0) { ++ dev_err(port->dev, "Failed to set baud rate clk: %d\n", ret); ++ return; ++ } ++ if (tty_termios_baud_rate(termios)) ++ tty_termios_encode_baud_rate(termios, baud, baud); ++ ++ quot = uart_get_divisor(port, baud); ++ ++ if (!quot) ++ quot = 1; ++ if (up->dma_enable) { ++ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR32 | UART_FCR_PXA_TRAIL; ++ fcr &= ~UART_FCR_PXA_BUS32; ++ } else { ++ if ((up->port.uartclk / quot) < (2400 * 16)) ++ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR1; ++ else if ((up->port.uartclk / quot) < (230400 * 16)) ++ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR8; ++ else ++ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR32; ++ } ++ ++ spin_lock_irqsave(&up->port.lock, flags); ++ up->ier |= UART_IER_UUE; ++ uart_update_timeout(port, termios->c_cflag, baud); ++ up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; ++ if (termios->c_iflag & INPCK) ++ up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE; ++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) ++ up->port.read_status_mask |= UART_LSR_BI; ++ ++ up->port.ignore_status_mask = 0; ++ if (termios->c_iflag & IGNPAR) ++ up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE; ++ if (termios->c_iflag & IGNBRK) { ++ up->port.ignore_status_mask |= UART_LSR_BI; ++ if (termios->c_iflag & IGNPAR) ++ up->port.ignore_status_mask |= UART_LSR_OE; ++ } ++ ++ if ((termios->c_cflag & CREAD) == 0) ++ up->port.ignore_status_mask |= UART_LSR_DR; ++ ++ if (!up->dma_enable) { ++ up->ier &= ~UART_IER_MSI; ++ if (UART_ENABLE_MS(&up->port, termios->c_cflag)) ++ up->ier |= UART_IER_MSI; ++ } ++ ++ serial_out(up, UART_IER, up->ier); ++ ++ if (termios->c_cflag & CRTSCTS) ++ up->mcr |= UART_MCR_AFE; ++ else ++ up->mcr &= ~UART_MCR_AFE; ++ ++ serial_out(up, UART_LCR, cval | UART_LCR_DLAB); ++ serial_out(up, UART_DLM, (quot >> 8) & 0xff); ++ (void)serial_in(up, UART_DLM); ++ serial_out(up, UART_DLL, quot & 0xff); ++ ++ (void)serial_in(up, UART_DLL); ++ dll = serial_in(up, UART_DLL); ++ WARN(dll != (quot & 0xff), "uart %d baud %d target 0x%x real 0x%x\n", ++ up->port.line, baud, quot & 0xff, dll); ++ ++ serial_out(up, UART_LCR, cval); ++ up->lcr = cval; ++ serial_pxa_set_mctrl(&up->port, up->port.mctrl); ++ serial_out(up, UART_FCR, fcr); ++ spin_unlock_irqrestore(&up->port.lock, flags); ++ ++ if (uart_console(&up->port)) { ++ up->cons_udelay = 1000000000 / baud * 10 / 8 / 1000; ++ if (up->cons_udelay <= 0) ++ up->cons_udelay = 1; ++ if (up->cons_udelay > 20) ++ up->cons_udelay = 20; ++ } ++ ++ if (up->dma_enable && up->uart_dma.dma_init) ++ pxa_uart_receive_dma_start(up); ++} ++ ++static void serial_pxa_pm(struct uart_port *port, unsigned int state, unsigned int oldstate) ++{ ++ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ ++ if (!state) { ++ clk_prepare_enable(up->gclk); ++ clk_prepare_enable(up->fclk); ++ } else { ++ clk_disable_unprepare(up->fclk); ++ clk_disable_unprepare(up->gclk); ++ } ++} ++ ++static void serial_pxa_release_port(struct uart_port *port) ++{ ++} ++ ++static int serial_pxa_request_port(struct uart_port *port) ++{ ++ return 0; ++} ++ ++static void serial_pxa_config_port(struct uart_port *port, int flags) ++{ ++ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ ++ up->port.type = PORT_PXA; ++} ++ ++static int serial_pxa_verify_port(struct uart_port *port, struct serial_struct *ser) ++{ ++ return -EINVAL; ++} ++ ++static const char *serial_pxa_type(struct uart_port *port) ++{ ++ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ ++ return up->name; ++} ++ ++static struct uart_pxa_port *serial_pxa_ports[NUM_UART_PORTS]; ++static struct uart_driver serial_pxa_reg; ++ ++#ifdef CONFIG_PM ++void serial_pxa_get_qos(int port) ++{ ++ struct uart_pxa_port *up; ++ ++ if (port < 0 || port >= NUM_UART_PORTS) { ++ pr_err("%s: wrong uart port %d\n", __func__, port); ++ return; ++ } ++ ++ up = serial_pxa_ports[port]; ++ if (!mod_timer(&up->pxa_timer, jiffies + PXA_TIMER_TIMEOUT)) ++ pm_runtime_get_sync(up->port.dev); ++} ++EXPORT_SYMBOL_GPL(serial_pxa_get_qos); ++#endif ++ ++void serial_pxa_assert_rts(int port) ++{ ++ struct uart_pxa_port *up; ++ unsigned long flags; ++ ++ if (port < 0 || port >= NUM_UART_PORTS) { ++ pr_err("%s: wrong uart port %d\n", __func__, port); ++ return; ++ } ++ ++ up = serial_pxa_ports[port]; ++ ++ spin_lock_irqsave(&up->port.lock, flags); ++ if (!serial_pxa_is_open(up)) { ++ spin_unlock_irqrestore(&up->port.lock, flags); ++ pr_err("%s: uart %d is shutdown\n", __func__, port); ++ return; ++ } ++ serial_pxa_set_mctrl(&up->port, up->port.mctrl | TIOCM_RTS); ++ uart_handle_cts_change(&up->port, UART_MSR_CTS); ++ spin_unlock_irqrestore(&up->port.lock, flags); ++} ++EXPORT_SYMBOL_GPL(serial_pxa_assert_rts); ++ ++void serial_pxa_deassert_rts(int port) ++{ ++ struct uart_pxa_port *up; ++ unsigned long flags; ++ ++ if (port < 0 || port >= NUM_UART_PORTS) { ++ pr_err("%s: wrong uart port %d\n", __func__, port); ++ return; ++ } ++ ++ up = serial_pxa_ports[port]; ++ ++ spin_lock_irqsave(&up->port.lock, flags); ++ if (!serial_pxa_is_open(up)) { ++ spin_unlock_irqrestore(&up->port.lock, flags); ++ pr_err("%s: uart %d is shutdown\n", __func__, port); ++ return; ++ } ++ serial_pxa_set_mctrl(&up->port, up->port.mctrl & ~TIOCM_RTS); ++ spin_unlock_irqrestore(&up->port.lock, flags); ++} ++EXPORT_SYMBOL_GPL(serial_pxa_deassert_rts); ++ ++#ifdef CONFIG_SERIAL_SPACEMIT_K1X_CONSOLE ++ ++#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) ++ ++static void wait_for_xmitr(struct uart_pxa_port *up) ++{ ++ unsigned int status, tmout = 10000; ++ unsigned int cycle; ++ ++ if (uart_console(&up->port)) ++ cycle = up->cons_udelay; ++ else ++ cycle = 1; ++ ++ tmout = 10000 / cycle; ++ ++ do { ++ status = serial_in(up, UART_LSR); ++ ++ if (status & UART_LSR_BI) ++ up->lsr_break_flag = UART_LSR_BI; ++ ++ if ((status & BOTH_EMPTY) == BOTH_EMPTY) ++ break; ++ udelay(cycle); ++ } while (--tmout); ++ ++ if (up->port.flags & UPF_CONS_FLOW) { ++ tmout = 1000000; ++ while (--tmout && ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0)) ++ udelay(cycle); ++ } ++ ++ if (!tmout) { ++ if (up->port.flags & UPF_CONS_FLOW) ++ status = serial_in(up, UART_MSR); ++ else ++ status = serial_in(up, UART_LSR); ++ panic("failed to read uart status, status:0x%08x\n", status); ++ } ++} ++ ++static void serial_pxa_console_putchar(struct uart_port *port, unsigned char ch) ++{ ++ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ ++ wait_for_xmitr(up); ++ serial_out(up, UART_TX, ch); ++} ++ ++static void serial_pxa_console_write(struct console *co, const char *s, unsigned int count) ++{ ++ struct uart_pxa_port *up = serial_pxa_ports[co->index]; ++ unsigned int ier; ++ unsigned long flags; ++ int locked = 1; ++ ++ clk_enable(up->gclk); ++ clk_enable(up->fclk); ++ ++ local_irq_save(flags); ++ if (up->port.sysrq) ++ locked = 0; ++ else if (oops_in_progress) ++ locked = spin_trylock(&up->port.lock); ++ else ++ spin_lock(&up->port.lock); ++ ++ ier = serial_in(up, UART_IER); ++ serial_out(up, UART_IER, UART_IER_UUE); ++ ++ uart_console_write(&up->port, s, count, serial_pxa_console_putchar); ++ ++ wait_for_xmitr(up); ++ serial_out(up, UART_IER, ier); ++ ++ if (locked) ++ spin_unlock(&up->port.lock); ++ local_irq_restore(flags); ++ clk_disable(up->fclk); ++ clk_disable(up->gclk); ++} ++ ++#ifdef CONFIG_CONSOLE_POLL ++static int serial_pxa_get_poll_char(struct uart_port *port) ++{ ++ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ unsigned char lsr = serial_in(up, UART_LSR); ++ ++ while (!(lsr & UART_LSR_DR)) ++ lsr = serial_in(up, UART_LSR); ++ ++ return serial_in(up, UART_RX); ++} ++ ++static void serial_pxa_put_poll_char(struct uart_port *port, ++ unsigned char c) ++{ ++ unsigned int ier; ++ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ ++ ier = serial_in(up, UART_IER); ++ serial_out(up, UART_IER, UART_IER_UUE); ++ ++ wait_for_xmitr(up); ++ serial_out(up, UART_TX, c); ++ if (c == 10) { ++ wait_for_xmitr(up); ++ serial_out(up, UART_TX, 13); ++ } ++ ++ wait_for_xmitr(up); ++ serial_out(up, UART_IER, ier); ++} ++ ++#endif /* CONFIG_CONSOLE_POLL */ ++ ++static int __init ++serial_pxa_console_setup(struct console *co, char *options) ++{ ++ struct uart_pxa_port *up; ++ int baud = 9600; ++ int bits = 8; ++ int parity = 'n'; ++ int flow = 'n'; ++ ++ if (co->index == -1 || co->index >= serial_pxa_reg.nr) ++ co->index = 0; ++ up = serial_pxa_ports[co->index]; ++ if (!up) ++ return -ENODEV; ++ ++ if (options) ++ uart_parse_options(options, &baud, &parity, &bits, &flow); ++ ++ return uart_set_options(&up->port, co, baud, parity, bits, flow); ++} ++ ++static struct console serial_pxa_console = { ++ .name = "ttySP", ++ .write = serial_pxa_console_write, ++ .device = uart_console_device, ++ .setup = serial_pxa_console_setup, ++ .flags = CON_PRINTBUFFER, ++ .index = -1, ++ .data = &serial_pxa_reg, ++}; ++ ++static void pxa_early_write(struct console *con, const char *s, ++ unsigned int n) ++{ ++ struct earlycon_device *dev = con->data; ++ ++ uart_console_write(&dev->port, s, n, serial_pxa_console_putchar); ++} ++ ++static int __init pxa_early_console_setup(struct earlycon_device *device, const char *opt) ++{ ++ if (!device->port.membase) ++ return -ENODEV; ++ ++ device->con->write = pxa_early_write; ++ return 0; ++} ++ ++EARLYCON_DECLARE(pxa_serial, pxa_early_console_setup); ++OF_EARLYCON_DECLARE(pxa_serial, "spacemit,pxa-uart", pxa_early_console_setup); ++ ++#define PXA_CONSOLE (&serial_pxa_console) ++#else ++#define PXA_CONSOLE NULL ++#endif /* CONFIG_SERIAL_SPACEMIT_K1X_CONSOLE */ ++ ++static const struct uart_ops serial_pxa_pops = { ++ .tx_empty = serial_pxa_tx_empty, ++ .set_mctrl = serial_pxa_set_mctrl, ++ .get_mctrl = serial_pxa_get_mctrl, ++ .stop_tx = serial_pxa_stop_tx, ++ .start_tx = serial_pxa_start_tx, ++ .stop_rx = serial_pxa_stop_rx, ++ .enable_ms = serial_pxa_enable_ms, ++ .break_ctl = serial_pxa_break_ctl, ++ .startup = serial_pxa_startup, ++ .shutdown = serial_pxa_shutdown, ++ .set_termios = serial_pxa_set_termios, ++ .pm = serial_pxa_pm, ++ .type = serial_pxa_type, ++ .release_port = serial_pxa_release_port, ++ .request_port = serial_pxa_request_port, ++ .config_port = serial_pxa_config_port, ++ .verify_port = serial_pxa_verify_port, ++#if defined(CONFIG_CONSOLE_POLL) && defined(CONFIG_SERIAL_SPACEMIT_K1X_CONSOLE) ++ .poll_get_char = serial_pxa_get_poll_char, ++ .poll_put_char = serial_pxa_put_poll_char, ++#endif ++}; ++ ++static struct uart_driver serial_pxa_reg = { ++ .owner = THIS_MODULE, ++ .driver_name = "PXA serial", ++ .dev_name = "ttySP", ++ .major = TTY_MAJOR, ++ .minor = 128, ++ .nr = NUM_UART_PORTS, ++ .cons = PXA_CONSOLE, ++}; ++ ++static int serial_pxa_is_open(struct uart_pxa_port *up) ++{ ++ struct uart_state *state; ++ struct uart_pxa_dma *pxa_dma; ++ ++ if (!up) ++ return 0; ++ ++ state = serial_pxa_reg.state + up->port.line; ++ pxa_dma = &up->uart_dma; ++ ++ if (up->dma_enable) { ++ return ((up->ier & UART_IER_DMAE) && pxa_dma->dma_init && ++ (state->pm_state == UART_PM_STATE_ON)); ++ } else { ++ return (state->pm_state == UART_PM_STATE_ON); ++ } ++} ++ ++#ifdef CONFIG_PM ++ ++#ifdef CONFIG_HIBERNATION ++unsigned long pxa_clk_freq; ++struct clk *pxa_clk_parent; ++#endif ++ ++static int __maybe_unused serial_pxa_suspend(struct device *dev) ++{ ++ struct uart_pxa_port *sport = dev_get_drvdata(dev); ++ struct uart_pxa_dma *pxa_dma = &sport->uart_dma; ++ struct dma_tx_state dma_state; ++ unsigned char tmp[256]; ++ int fifo_cnt, cnt = 0; ++ ++ if (!console_suspend_enabled || !sport) ++ return 0; ++ ++ if (serial_pxa_is_open(sport) && sport->dma_enable) { ++ int sent = 0; ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ pxa_dma->tx_stop = 1; ++ pxa_dma->rx_stop = 1; ++ pxa_dma->tx_saved_len = 0; ++ if (dma_async_is_tx_complete(pxa_dma->txdma_chan, ++ pxa_dma->tx_cookie, NULL, NULL) != DMA_COMPLETE) { ++ dmaengine_pause(pxa_dma->txdma_chan); ++ udelay(100); ++ dmaengine_tx_status(pxa_dma->txdma_chan, ++ pxa_dma->tx_cookie, &dma_state); ++ sent = pxa_dma->tx_size - dma_state.residue; ++ pxa_dma->tx_saved_len = dma_state.residue; ++ memcpy(pxa_dma->tx_buf_save, pxa_dma->txdma_addr + sent, ++ dma_state.residue); ++ stop_dma(sport, PXA_UART_TX); ++ } ++ ++ if (dma_async_is_tx_complete(pxa_dma->rxdma_chan, ++ pxa_dma->rx_cookie, NULL, NULL) != DMA_COMPLETE) { ++ spin_lock(&sport->port.lock); ++ serial_pxa_set_mctrl(&sport->port, sport->port.mctrl & ~TIOCM_RTS); ++ spin_unlock(&sport->port.lock); ++ udelay(100); ++ dmaengine_pause(pxa_dma->rxdma_chan); ++ udelay(100); ++ pxa_uart_receive_dma_cb(sport); ++ stop_dma(sport, PXA_UART_RX); ++ ++ fifo_cnt = serial_in(sport, UART_FOR); ++ while (fifo_cnt > 0) { ++ *(tmp + cnt) = serial_in(sport, UART_RX) & 0xff; ++ cnt++; ++ fifo_cnt = serial_in(sport, UART_FOR); ++ } ++ ++ if (cnt > 0) { ++ tty_insert_flip_string(&sport->port.state->port, tmp, cnt); ++ sport->port.icount.rx += cnt; ++ tty_flip_buffer_push(&sport->port.state->port); ++ } ++ } ++ local_irq_restore(flags); ++ } ++ ++ if (sport) { ++#ifdef CONFIG_HIBERNATION ++ pxa_clk_freq = clk_get_rate(sport->fclk); ++ pxa_clk_parent = clk_get_parent(sport->fclk); ++#endif ++ uart_suspend_port(&serial_pxa_reg, &sport->port); ++#ifdef CONFIG_HIBERNATION ++ clk_set_parent(sport->fclk, NULL); ++#endif ++ } ++ ++ if (del_timer_sync(&sport->pxa_timer)) ++ _pxa_timer_handler(sport); ++ ++ return 0; ++} ++ ++static int __maybe_unused serial_pxa_resume(struct device *dev) ++{ ++ struct uart_pxa_port *sport = dev_get_drvdata(dev); ++ struct uart_pxa_dma *pxa_dma = &sport->uart_dma; ++ ++ if (!console_suspend_enabled || !sport) ++ return 0; ++ ++ sport->in_resume = true; ++ ++#ifdef CONFIG_HIBERNATION ++ clk_set_parent(sport->fclk, pxa_clk_parent); ++ clk_set_rate(sport->fclk, pxa_clk_freq); ++#endif ++ uart_resume_port(&serial_pxa_reg, &sport->port); ++ ++ if (serial_pxa_is_open(sport) && sport->dma_enable) { ++ if (pxa_dma->tx_saved_len > 0) { ++ sport->from_resume = true; ++ memcpy(pxa_dma->txdma_addr, pxa_dma->tx_buf_save, ++ pxa_dma->tx_saved_len); ++ pxa_uart_transmit_dma_start(sport, ++ pxa_dma->tx_saved_len); ++ } else { ++ tasklet_schedule(&pxa_dma->tklet); ++ } ++ ++ pxa_uart_receive_dma_start(sport); ++ } ++ sport->in_resume = false; ++ ++ return 0; ++} ++ ++static SIMPLE_DEV_PM_OPS(serial_pxa_pm_ops, serial_pxa_suspend, serial_pxa_resume); ++ ++static void _pxa_timer_handler(struct uart_pxa_port *up) ++{ ++#if SUPPORT_POWER_QOS ++ pm_runtime_put_sync(up->port.dev); ++#endif ++} ++ ++static void pxa_timer_handler(struct timer_list *t) ++{ ++ struct uart_pxa_port *up = from_timer(up, t, pxa_timer); ++ ++ _pxa_timer_handler(up); ++} ++ ++static void __maybe_unused uart_edge_wakeup_handler(int gpio, void *data) ++{ ++ struct uart_pxa_port *up = (struct uart_pxa_port *)data; ++ ++ if (!mod_timer(&up->pxa_timer, jiffies + PXA_TIMER_TIMEOUT)) { ++#if SUPPORT_POWER_QOS ++ pm_runtime_get_sync(up->port.dev); ++#endif ++ } ++ pm_wakeup_event(up->port.dev, BLOCK_SUSPEND_TIMEOUT); ++} ++ ++static void uart_tx_lpm_handler(struct work_struct *work) ++{ ++ struct uart_pxa_port *up = container_of(work, struct uart_pxa_port, uart_tx_lpm_work); ++ ++ while (!(serial_in(up, UART_LSR) & UART_LSR_TEMT)) ++ usleep_range(1000, 2000); ++ ++#if SUPPORT_POWER_QOS ++ pm_runtime_put_sync(up->port.dev); ++#endif ++} ++#endif ++ ++static const struct of_device_id serial_k1x_dt_ids[] = { ++ { .compatible = "spacemit,k1x-uart", }, ++ {} ++}; ++ ++static int serial_pxa_probe_dt(struct platform_device *pdev, struct uart_pxa_port *sport) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ int ret; ++ ++ if (!np) ++ return 1; ++ ++ if (of_get_property(np, "dmas", NULL)) ++ sport->dma_enable = 1; ++ ++ ret = of_alias_get_id(np, "serial"); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); ++ return ret; ++ } ++ sport->port.line = ret; ++ ++#ifdef CONFIG_PM ++ if (of_property_read_u32(np, "edge-wakeup-pin", &sport->edge_wakeup_gpio)) ++ dev_dbg(&pdev->dev, "no edge-wakeup-pin defined\n"); ++#endif ++ sport->device_ctrl_rts = of_property_read_bool(np, "device-control-rts"); ++ ++ return 0; ++} ++ ++static int serial_pxa_probe(struct platform_device *dev) ++{ ++ struct uart_pxa_port *sport; ++ struct resource *mmres; ++ int ret; ++ int irq; ++ struct resource *dmares; ++ struct uart_pxa_dma *pxa_dma; ++ ++ mmres = platform_get_resource(dev, IORESOURCE_MEM, 0); ++ if (!mmres) ++ return -ENODEV; ++ ++ irq = platform_get_irq(dev, 0); ++ if (irq < 0) ++ return irq; ++ ++ sport = kzalloc(sizeof(*sport), GFP_KERNEL); ++ if (!sport) ++ return -ENOMEM; ++ ++#ifdef CONFIG_PM ++ sport->uart_dma.tx_buf_save = kmalloc(DMA_BLOCK, GFP_KERNEL); ++ if (!sport->uart_dma.tx_buf_save) { ++ kfree(sport); ++ return -ENOMEM; ++ } ++#endif ++ sport->gclk = devm_clk_get(&dev->dev, "gate"); ++ if (IS_ERR(sport->gclk)) { ++ ret = PTR_ERR(sport->gclk); ++ goto err_free; ++ } ++ ++ sport->fclk = devm_clk_get(&dev->dev, "func"); ++ if (IS_ERR(sport->fclk)) { ++ ret = PTR_ERR(sport->fclk); ++ goto err_free; ++ } ++ ++ if (sport->gclk) { ++ ret = clk_prepare(sport->gclk); ++ if (ret) { ++ clk_put(sport->gclk); ++ goto err_free; ++ } ++ } ++ ++ if (sport->fclk) { ++ ret = clk_prepare(sport->fclk); ++ if (ret) { ++ clk_put(sport->fclk); ++ goto err_free; ++ } ++ } ++ ++ sport->port.type = PORT_PXA; ++ sport->port.iotype = UPIO_MEM; ++ sport->port.mapbase = mmres->start; ++ sport->port.irq = irq; ++ sport->port.fifosize = 64; ++ sport->port.ops = &serial_pxa_pops; ++ sport->port.dev = &dev->dev; ++ sport->port.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF; ++ sport->port.uartclk = clk_get_rate(sport->fclk); ++ sport->resets = devm_reset_control_get_optional(&dev->dev, NULL); ++ if (IS_ERR(sport->resets)) { ++ ret = PTR_ERR(sport->resets); ++ goto err_clk; ++ } ++ reset_control_deassert(sport->resets); ++ ++ sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SPACEMIT_K1X_CONSOLE); ++ ++ sport->edge_wakeup_gpio = -1; ++ ++ pxa_dma = &sport->uart_dma; ++ pxa_dma->txdma_chan = NULL; ++ pxa_dma->rxdma_chan = NULL; ++ pxa_dma->txdma_addr = NULL; ++ pxa_dma->rxdma_addr = NULL; ++ pxa_dma->dma_init = false; ++ sport->dma_enable = 0; ++ sport->cons_udelay = 1; ++ sport->in_resume = false; ++ ++ ret = serial_pxa_probe_dt(dev, sport); ++ if (ret > 0) ++ sport->port.line = dev->id; ++ else if (ret < 0) ++ goto err_rst; ++ ++ if (sport->port.line >= ARRAY_SIZE(serial_pxa_ports)) { ++ dev_err(&dev->dev, "serial%d out of range\n", sport->port.line); ++ ret = -EINVAL; ++ goto err_rst; ++ } ++ snprintf(sport->name, PXA_NAME_LEN - 1, "UART%d", sport->port.line + 1); ++ ++ dma_set_mask(&dev->dev, DMA_BIT_MASK(64)); ++ dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(64)); ++ if (ret > 0 && sport->dma_enable) { ++ dmares = platform_get_resource(dev, IORESOURCE_DMA, 0); ++ if (dmares) { ++ dmares = platform_get_resource(dev, IORESOURCE_DMA, 1); ++ if (dmares) ++ sport->dma_enable = 1; ++ } ++ } ++ ++ ret = request_irq(sport->port.irq, serial_pxa_irq, 0, sport->name, sport); ++ if (ret) ++ goto err_rst; ++ ++ disable_irq(sport->port.irq); ++ ++#ifdef CONFIG_PM ++#if SUPPORT_POWER_QOS ++ pm_runtime_enable(&dev->dev); ++ pm_runtime_set_active(&dev->dev); ++ pm_runtime_irq_safe(&dev->dev); ++#endif ++#endif ++ ++ sport->port.membase = ioremap(mmres->start, resource_size(mmres)); ++ if (!sport->port.membase) { ++ ret = -ENOMEM; ++ goto err_qos; ++ } ++ ++#ifdef CONFIG_PM ++ INIT_WORK(&sport->uart_tx_lpm_work, uart_tx_lpm_handler); ++ ++ timer_setup(&sport->pxa_timer, pxa_timer_handler, 0); ++#endif ++ ++ serial_pxa_ports[sport->port.line] = sport; ++ uart_add_one_port(&serial_pxa_reg, &sport->port); ++ dev_dbg(&dev->dev, "uart clk_rate: %lu\n", clk_get_rate(sport->fclk)); ++ platform_set_drvdata(dev, sport); ++ ++ return 0; ++ ++#ifdef CONFIG_PM ++ uart_remove_one_port(&serial_pxa_reg, &sport->port); ++ iounmap(sport->port.membase); ++#endif ++err_qos: ++#ifdef CONFIG_PM ++ pm_runtime_disable(&dev->dev); ++#endif ++ free_irq(sport->port.irq, sport); ++err_rst: ++ reset_control_assert(sport->resets); ++err_clk: ++ clk_unprepare(sport->fclk); ++ clk_unprepare(sport->gclk); ++ clk_put(sport->fclk); ++ clk_put(sport->gclk); ++err_free: ++ kfree(sport); ++ return ret; ++} ++ ++static int serial_pxa_remove(struct platform_device *dev) ++{ ++ struct uart_pxa_port *sport = platform_get_drvdata(dev); ++ ++#ifdef CONFIG_PM ++ pm_runtime_disable(&dev->dev); ++#endif ++ ++ uart_remove_one_port(&serial_pxa_reg, &sport->port); ++ ++ reset_control_assert(sport->resets); ++ free_irq(sport->port.irq, sport); ++ clk_unprepare(sport->fclk); ++ clk_unprepare(sport->gclk); ++ clk_put(sport->fclk); ++ clk_put(sport->gclk); ++ ++#ifdef CONFIG_PM ++ kfree(sport->uart_dma.tx_buf_save); ++#endif ++ kfree(sport); ++ serial_pxa_ports[dev->id] = NULL; ++ ++ return 0; ++} ++ ++static struct platform_driver serial_pxa_driver = { ++ .probe = serial_pxa_probe, ++ .remove = serial_pxa_remove, ++ .driver = { ++ .name = "spacemit-k1x-uart", ++#ifdef CONFIG_PM ++ .pm = &serial_pxa_pm_ops, ++#endif ++ .suppress_bind_attrs = true, ++ .of_match_table = serial_k1x_dt_ids, ++ }, ++}; ++ ++static int __init serial_pxa_init(void) ++{ ++ int ret; ++ ++ ret = uart_register_driver(&serial_pxa_reg); ++ if (ret != 0) ++ return ret; ++ ++ ret = platform_driver_register(&serial_pxa_driver); ++ if (ret != 0) ++ uart_unregister_driver(&serial_pxa_reg); ++ ++ return ret; ++} ++ ++static void __exit serial_pxa_exit(void) ++{ ++ platform_driver_unregister(&serial_pxa_driver); ++ uart_unregister_driver(&serial_pxa_reg); ++} ++module_init(serial_pxa_init); ++module_exit(serial_pxa_exit); ++ diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c -index 3ec391d06020..8a684e489ca9 100644 +index 3491de5272e8..ed952dc4ce3b 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c -@@ -1814,8 +1814,8 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) +@@ -1812,8 +1812,8 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) * 2. Poll queues do not need ESI. */ nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; @@ -559680,7 +611803,7 @@ index 3ec391d06020..8a684e489ca9 100644 if (ret) { dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret); goto out; -@@ -1844,7 +1844,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) +@@ -1842,7 +1842,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) devm_free_irq(hba->dev, desc->irq, hba); } msi_unlock_descs(hba->dev); @@ -559689,7 +611812,7 @@ index 3ec391d06020..8a684e489ca9 100644 } else { if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 && host->hw_ver.step == 0) { -@@ -1924,7 +1924,8 @@ static void ufs_qcom_remove(struct platform_device *pdev) +@@ -1922,7 +1922,8 @@ static void ufs_qcom_remove(struct platform_device *pdev) ufshcd_pltfrm_remove(pdev); if (host->esi_enabled) @@ -559739,7 +611862,7 @@ index fe1493d4bbe5..9422571d469d 100644 +obj-$(CONFIG_USB_DWC3_RTK) += dwc3-rtk.o +obj-$(CONFIG_USB_DWC3_XUANTIE) += dwc3-xuantie.o diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c -index fcb509059d7c..4b1dd437e5f2 100644 +index 30404461ef7d..7f1a85f471e5 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -42,6 +42,14 @@ @@ -559757,7 +611880,7 @@ index fcb509059d7c..4b1dd437e5f2 100644 /** * dwc3_get_dr_mode - Validates and sets dr_mode * @dwc: pointer to our context structure -@@ -1520,6 +1528,9 @@ static void dwc3_get_properties(struct dwc3 *dwc) +@@ -1533,6 +1541,9 @@ static void dwc3_get_properties(struct dwc3 *dwc) */ hird_threshold = 12; @@ -559767,7 +611890,7 @@ index fcb509059d7c..4b1dd437e5f2 100644 /* * default to a TXFIFO size large enough to fit 6 max packets. This * allows for systems with larger bus latencies to have some headroom -@@ -1527,11 +1538,16 @@ static void dwc3_get_properties(struct dwc3 *dwc) +@@ -1540,11 +1551,16 @@ static void dwc3_get_properties(struct dwc3 *dwc) */ tx_fifo_resize_max_num = 6; @@ -560561,6 +612684,113 @@ index 000000000000..80cb0b1e3d64 +MODULE_AUTHOR("Wei.Liu "); +MODULE_DESCRIPTION("PMIC Watchdog Driver for TH1520"); +MODULE_LICENSE("GPL"); +diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h +index 7c49c8a35005..396909300897 100644 +--- a/include/acpi/acpi_bus.h ++++ b/include/acpi/acpi_bus.h +@@ -13,11 +13,9 @@ + #include + #include + +-/* TBD: Make dynamic */ +-#define ACPI_MAX_HANDLES 10 + struct acpi_handle_list { + u32 count; +- acpi_handle handles[ACPI_MAX_HANDLES]; ++ acpi_handle *handles; + }; + + /* acpi_utils.h */ +@@ -28,11 +26,14 @@ acpi_status + acpi_evaluate_integer(acpi_handle handle, + acpi_string pathname, + struct acpi_object_list *arguments, unsigned long long *data); +-acpi_status +-acpi_evaluate_reference(acpi_handle handle, +- acpi_string pathname, +- struct acpi_object_list *arguments, +- struct acpi_handle_list *list); ++bool acpi_evaluate_reference(acpi_handle handle, acpi_string pathname, ++ struct acpi_object_list *arguments, ++ struct acpi_handle_list *list); ++bool acpi_handle_list_equal(struct acpi_handle_list *list1, ++ struct acpi_handle_list *list2); ++void acpi_handle_list_replace(struct acpi_handle_list *dst, ++ struct acpi_handle_list *src); ++void acpi_handle_list_free(struct acpi_handle_list *list); + acpi_status + acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code, + struct acpi_buffer *status_buf); +@@ -369,6 +370,24 @@ struct acpi_device_data { + + struct acpi_gpio_mapping; + ++struct acpi_device_software_node_port { ++ unsigned int port_nr; ++}; ++ ++/** ++ * struct acpi_device_software_nodes - Software nodes for an ACPI device ++ * @nodes: Software nodes for root as well as ports and endpoints. ++ * @nodeprts: Array of software node pointers, for (un)registering them. ++ * @ports: Information related to each port and endpoint within a port. ++ * @num_ports: The number of ports. ++ */ ++struct acpi_device_software_nodes { ++ struct software_node *nodes; ++ const struct software_node **nodeptrs; ++ struct acpi_device_software_node_port *ports; ++ unsigned int num_ports; ++}; ++ + /* Device */ + struct acpi_device { + u32 pld_crc; +@@ -830,6 +849,8 @@ static inline void acpi_put_acpi_dev(struct acpi_device *adev) + + int acpi_wait_for_acpi_ipmi(void); + ++int acpi_scan_add_dep(acpi_handle handle, struct acpi_handle_list *dep_devices); ++u32 arch_acpi_add_auto_dep(acpi_handle handle); + #else /* CONFIG_ACPI */ + + static inline int register_acpi_bus_type(void *bus) { return 0; } +diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h +index c080d579a546..e32149d605dc 100644 +--- a/include/acpi/actbl3.h ++++ b/include/acpi/actbl3.h +@@ -192,7 +192,8 @@ enum acpi_srat_type { + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4, /* ACPI 6.2 */ + ACPI_SRAT_TYPE_GENERIC_AFFINITY = 5, /* ACPI 6.3 */ + ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY = 6, /* ACPI 6.4 */ +- ACPI_SRAT_TYPE_RESERVED = 7 /* 7 and greater are reserved */ ++ ACPI_SRAT_TYPE_RINTC_AFFINITY = 7, /* ACPI 6.6 */ ++ ACPI_SRAT_TYPE_RESERVED = 8 /* 8 and greater are reserved */ + }; + + /* +@@ -296,6 +297,21 @@ struct acpi_srat_generic_affinity { + #define ACPI_SRAT_GENERIC_AFFINITY_ENABLED (1) /* 00: Use affinity structure */ + #define ACPI_SRAT_ARCHITECTURAL_TRANSACTIONS (1<<1) /* ACPI 6.4 */ + ++/* 7: RINTC Affinity Structure(ACPI 6.6) */ ++ ++struct acpi_srat_rintc_affinity { ++ struct acpi_subtable_header header; ++ u16 reserved; ++ u32 proximity_domain; ++ u32 acpi_processor_uid; ++ u32 flags; ++ u32 clock_domain; ++}; ++ ++/* Flags for ACPI_SRAT_RINTC_AFFINITY */ ++ ++#define ACPI_SRAT_RINTC_ENABLED (1) /* 00: Use affinity structure */ ++ + /******************************************************************************* + * + * STAO - Status Override Table (_STA override) - ACPI 6.0 diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index c75d4a753849..879e5f8aa5e9 100644 --- a/include/asm-generic/pgalloc.h @@ -560795,6 +613025,235 @@ index 000000000000..5584243f9135 +#define SAFE_MODE 2 +#define BYPASS_MODE 3 +#endif +diff --git a/include/dt-bindings/clock/spacemit-k1x-clock.h b/include/dt-bindings/clock/spacemit-k1x-clock.h +new file mode 100644 +index 000000000000..5dd92a6cde71 +--- /dev/null ++++ b/include/dt-bindings/clock/spacemit-k1x-clock.h +@@ -0,0 +1,223 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (c) 2022 Spacemit, Inc ++ */ ++ ++#ifndef _DT_BINDINGS_CLK_SPACEMIT_K1X_H_ ++#define _DT_BINDINGS_CLK_SPACEMIT_K1X_H_ ++ ++#define CLK_PLL2 0 ++#define CLK_PLL3 1 ++#define CLK_PLL1_D2 2 ++#define CLK_PLL1_D3 3 ++#define CLK_PLL1_D4 4 ++#define CLK_PLL1_D5 5 ++#define CLK_PLL1_D6 6 ++#define CLK_PLL1_D7 7 ++#define CLK_PLL1_D8 8 ++#define CLK_PLL1_D11 9 ++#define CLK_PLL1_D13 10 ++#define CLK_PLL1_D23 11 ++#define CLK_PLL1_D64 12 ++#define CLK_PLL1_D10_AUD 13 ++#define CLK_PLL1_D100_AUD 14 ++#define CLK_PLL2_D1 15 ++#define CLK_PLL2_D2 16 ++#define CLK_PLL2_D3 17 ++#define CLK_PLL2_D4 18 ++#define CLK_PLL2_D5 19 ++#define CLK_PLL2_D6 20 ++#define CLK_PLL2_D7 21 ++#define CLK_PLL2_D8 22 ++#define CLK_PLL3_D1 23 ++#define CLK_PLL3_D2 24 ++#define CLK_PLL3_D3 25 ++#define CLK_PLL3_D4 26 ++#define CLK_PLL3_D5 27 ++#define CLK_PLL3_D6 28 ++#define CLK_PLL3_D7 29 ++#define CLK_PLL3_D8 30 ++#define CLK_PLL1_307P2 31 ++#define CLK_PLL1_76P8 32 ++#define CLK_PLL1_61P44 33 ++#define CLK_PLL1_153P6 34 ++#define CLK_PLL1_102P4 35 ++#define CLK_PLL1_51P2 36 ++#define CLK_PLL1_51P2_AP 37 ++#define CLK_PLL1_57P6 38 ++#define CLK_PLL1_25P6 39 ++#define CLK_PLL1_12P8 40 ++#define CLK_PLL1_12P8_WDT 41 ++#define CLK_PLL1_6P4 42 ++#define CLK_PLL1_3P2 43 ++#define CLK_PLL1_1P6 44 ++#define CLK_PLL1_0P8 45 ++#define CLK_PLL1_351 46 ++#define CLK_PLL1_409P6 47 ++#define CLK_PLL1_204P8 48 ++#define CLK_PLL1_491 49 ++#define CLK_PLL1_245P76 50 ++#define CLK_PLL1_614 51 ++#define CLK_PLL1_47P26 52 ++#define CLK_PLL1_31P5 53 ++#define CLK_PLL1_819 54 ++#define CLK_PLL1_1228 55 ++#define CLK_SLOW_UART1 56 ++#define CLK_SLOW_UART2 57 ++#define CLK_UART1 58 ++#define CLK_UART2 59 ++#define CLK_UART3 60 ++#define CLK_UART4 61 ++#define CLK_UART5 62 ++#define CLK_UART6 63 ++#define CLK_UART7 64 ++#define CLK_UART8 65 ++#define CLK_UART9 66 ++#define CLK_GPIO 67 ++#define CLK_PWM0 68 ++#define CLK_PWM1 69 ++#define CLK_PWM2 70 ++#define CLK_PWM3 71 ++#define CLK_PWM4 72 ++#define CLK_PWM5 73 ++#define CLK_PWM6 74 ++#define CLK_PWM7 75 ++#define CLK_PWM8 76 ++#define CLK_PWM9 77 ++#define CLK_PWM10 78 ++#define CLK_PWM11 79 ++#define CLK_PWM12 80 ++#define CLK_PWM13 81 ++#define CLK_PWM14 82 ++#define CLK_PWM15 83 ++#define CLK_PWM16 84 ++#define CLK_PWM17 85 ++#define CLK_PWM18 86 ++#define CLK_PWM19 87 ++#define CLK_SSP3 88 ++#define CLK_RTC 89 ++#define CLK_TWSI0 90 ++#define CLK_TWSI1 91 ++#define CLK_TWSI2 92 ++#define CLK_TWSI4 93 ++#define CLK_TWSI5 94 ++#define CLK_TWSI6 95 ++#define CLK_TWSI7 96 ++#define CLK_TWSI8 97 ++#define CLK_TIMERS1 98 ++#define CLK_TIMERS2 99 ++#define CLK_AIB 100 ++#define CLK_ONEWIRE 101 ++#define CLK_SSPA0 102 ++#define CLK_SSPA1 103 ++#define CLK_DRO 104 ++#define CLK_IR 105 ++#define CLK_TSEN 106 ++#define CLK_IPC_AP2AUD 107 ++#define CLK_CAN0 108 ++#define CLK_CAN0_BUS 109 ++#define CLK_WDT 110 ++#define CLK_RIPC 111 ++#define CLK_JPG 112 ++#define CLK_JPF_4KAFBC 113 ++#define CLK_JPF_2KAFBC 114 ++#define CLK_CCIC2PHY 115 ++#define CLK_CCIC3PHY 116 ++#define CLK_CSI 117 ++#define CLK_CAMM0 118 ++#define CLK_CAMM1 119 ++#define CLK_CAMM2 120 ++#define CLK_ISP_CPP 121 ++#define CLK_ISP_BUS 122 ++#define CLK_ISP 123 ++#define CLK_DPU_MCLK 124 ++#define CLK_DPU_ESC 125 ++#define CLK_DPU_BIT 126 ++#define CLK_DPU_PXCLK 127 ++#define CLK_DPU_HCLK 128 ++#define CLK_DPU_SPI 129 ++#define CLK_DPU_SPI_HBUS 130 ++#define CLK_DPU_SPIBUS 131 ++#define CLK_SPU_SPI_ACLK 132 ++#define CLK_V2D 133 ++#define CLK_CCIC_4X 134 ++#define CLK_CCIC1PHY 135 ++#define CLK_SDH_AXI 136 ++#define CLK_SDH0 137 ++#define CLK_SDH1 138 ++#define CLK_SDH2 139 ++#define CLK_USB_P1 140 ++#define CLK_USB_AXI 141 ++#define CLK_USB30 142 ++#define CLK_QSPI 143 ++#define CLK_QSPI_BUS 144 ++#define CLK_DMA 145 ++#define CLK_AES 146 ++#define CLK_VPU 147 ++#define CLK_GPU 148 ++#define CLK_EMMC 149 ++#define CLK_EMMC_X 150 ++#define CLK_AUDIO 151 ++#define CLK_HDMI 152 ++#define CLK_CCI550 153 ++#define CLK_PMUA_ACLK 154 ++#define CLK_CPU_C0_HI 155 ++#define CLK_CPU_C0_CORE 156 ++#define CLK_CPU_C0_ACE 157 ++#define CLK_CPU_C0_TCM 158 ++#define CLK_CPU_C1_HI 159 ++#define CLK_CPU_C1_CORE 160 ++#define CLK_CPU_C1_ACE 161 ++#define CLK_PCIE0 162 ++#define CLK_PCIE1 163 ++#define CLK_PCIE2 164 ++#define CLK_EMAC0_BUS 165 ++#define CLK_EMAC0_PTP 166 ++#define CLK_EMAC1_BUS 167 ++#define CLK_EMAC1_PTP 168 ++#define CLK_SEC_UART1 169 ++#define CLK_SEC_SSP2 170 ++#define CLK_SEC_TWSI3 171 ++#define CLK_SEC_RTC 172 ++#define CLK_SEC_TIMERS0 173 ++#define CLK_SEC_KPC 174 ++#define CLK_SEC_GPIO 175 ++#define CLK_APB 176 ++#define CLK_PLL3_80 177 ++#define CLK_PLL3_40 178 ++#define CLK_PLL3_20 179 ++#define CLK_SLOW_UART 180 ++#define CLK_I2S_SYSCLK 181 ++#define CLK_I2S_BCLK 182 ++#define CLK_RCPU_HDMIAUDIO 183 ++#define CLK_RCPU_CAN 184 ++#define CLK_RCPU_CAN_BUS 185 ++#define CLK_RCPU_I2C0 186 ++#define CLK_RCPU_SSP0 187 ++#define CLK_RCPU_IR 188 ++#define CLK_RCPU_UART0 189 ++#define CLK_RCPU_UART1 190 ++#define CLK_DPLL1 191 ++#define CLK_DPLL2 192 ++#define CLK_DFC_LVL0 193 ++#define CLK_DFC_LVL1 194 ++#define CLK_DFC_LVL2 195 ++#define CLK_DFC_LVL3 196 ++#define CLK_DFC_LVL4 197 ++#define CLK_DFC_LVL5 198 ++#define CLK_DFC_LVL6 199 ++#define CLK_DFC_LVL7 200 ++#define CLK_DDR 201 ++#define CLK_RCPU2_PWM0 202 ++#define CLK_RCPU2_PWM1 203 ++#define CLK_RCPU2_PWM2 204 ++#define CLK_RCPU2_PWM3 205 ++#define CLK_RCPU2_PWM4 206 ++#define CLK_RCPU2_PWM5 207 ++#define CLK_RCPU2_PWM6 208 ++#define CLK_RCPU2_PWM7 209 ++#define CLK_RCPU2_PWM8 210 ++#define CLK_RCPU2_PWM9 211 ++#define CLK_MAX_NO 212 ++ ++#endif /* _DT_BINDINGS_CLK_SPACEMIT_K1X_H_ */ diff --git a/include/dt-bindings/clock/th1520-audiosys.h b/include/dt-bindings/clock/th1520-audiosys.h new file mode 100644 index 000000000000..2001545b68b8 @@ -561567,6 +614026,66 @@ index 000000000000..1dd3bbb78da6 + +#endif + +diff --git a/include/dt-bindings/dma/spacemit-k1-dma.h b/include/dt-bindings/dma/spacemit-k1-dma.h +new file mode 100644 +index 000000000000..43b6904b912a +--- /dev/null ++++ b/include/dt-bindings/dma/spacemit-k1-dma.h +@@ -0,0 +1,54 @@ ++/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ ++/* ++ * Request type numbers for the Spacemit k1 DMA. ++ * ++ * Copyright (C) 2023 Spacemit ++ */ ++ ++#ifndef __DT_BINDINGS_DMA_SMACEMIT_K1_H__ ++#define __DT_BINDINGS_DMA_SMACEMIT_K1_H__ ++ ++#define DMA_UART0_TX 3 ++#define DMA_UART0_RX 4 ++#define DMA_UART2_TX 5 ++#define DMA_UART2_RX 6 ++#define DMA_UART3_TX 7 ++#define DMA_UART3_RX 8 ++#define DMA_UART4_TX 9 ++#define DMA_UART4_RX 10 ++#define DMA_I2C0_TX 11 ++#define DMA_I2C0_RX 12 ++#define DMA_I2C1_TX 13 ++#define DMA_I2C1_RX 14 ++#define DMA_I2C2_TX 15 ++#define DMA_I2C2_RX 16 ++#define DMA_I2C4_TX 17 ++#define DMA_I2C4_RX 18 ++#define DMA_SSP3_TX 19 ++#define DMA_SSP3_RX 20 ++#define DMA_SSPA0_TX 21 ++#define DMA_SSPA0_RX 22 ++#define DMA_SSPA1_TX 23 ++#define DMA_SSPA1_RX 24 ++#define DMA_UART5_TX 25 ++#define DMA_UART5_RX 26 ++#define DMA_UART6_TX 27 ++#define DMA_UART6_RX 28 ++#define DMA_UART7_TX 29 ++#define DMA_UART7_RX 30 ++#define DMA_UART8_TX 31 ++#define DMA_UART8_RX 32 ++#define DMA_UART9_TX 33 ++#define DMA_UART9_RX 34 ++#define DMA_I2C5_TX 35 ++#define DMA_I2C5_RX 36 ++#define DMA_I2C6_TX 37 ++#define DMA_I2C6_RX 38 ++#define DMA_I2C7_TX 39 ++#define DMA_I2C7_RX 40 ++#define DMA_I2C8_TX 41 ++#define DMA_I2C8_RX 42 ++#define DMA_QSPI_RX 44 ++#define DMA_QSPI_TX 45 ++ ++#endif /* __DT_BINDINGS_DMA_SMACEMIT_K1_H__ */ diff --git a/include/dt-bindings/firmware/xuantie/rsrc.h b/include/dt-bindings/firmware/xuantie/rsrc.h new file mode 100644 index 000000000000..58789da8a237 @@ -561591,6 +614110,348 @@ index 000000000000..58789da8a237 +#define TH1520_AON_R_LAST 7 + +#endif +diff --git a/include/dt-bindings/mmc/spacemit-k1-sdhci.h b/include/dt-bindings/mmc/spacemit-k1-sdhci.h +new file mode 100644 +index 000000000000..44b1e3f0ec03 +--- /dev/null ++++ b/include/dt-bindings/mmc/spacemit-k1-sdhci.h +@@ -0,0 +1,62 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * SDH driver for SPACEMIT K1 SDCHI ++ * Copyright (C) 2023 Spacemit ++ */ ++ ++#ifndef K1_DT_BINDINGS_MMC_SDHCI_H ++#define K1_DT_BINDINGS_MMC_SDHCI_H ++ ++/* K1x specific flag */ ++ ++/* MMC Quirks */ ++/* Controller has an unusable ADMA engine */ ++#define SDHCI_QUIRK_BROKEN_ADMA (1<<6) ++#define SDHCI_QUIRK2_PRESET_VALUE_BROKEN (1<<3) ++/* Controller does not support HS200 */ ++#define SDHCI_QUIRK2_BROKEN_HS200 (1<<6) ++/* Support SDH controller on FPGA */ ++#define SDHCI_QUIRK2_SUPPORT_PHY_BYPASS (1<<25) ++/* Disable scan card at probe phase */ ++#define SDHCI_QUIRK2_DISABLE_PROBE_CDSCAN (1<<26) ++/* Need to set IO capability by SOC part register */ ++#define SDHCI_QUIRK2_SET_AIB_MMC (1<<27) ++/* Controller not support phy module */ ++#define SDHCI_QUIRK2_BROKEN_PHY_MODULE (1<<28) ++/* Controller support encrypt module */ ++#define SDHCI_QUIRK2_SUPPORT_ENCRYPT (1<<29) ++/* Controller does not support SDR104 */ ++#define SDHCI_QUIRK2_BROKEN_SDR104 (1<<30) ++ ++/* Common flag */ ++/* Controller provides an incorrect timeout value for transfers */ ++#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12) ++/* Controller has unreliable card detection */ ++#define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15) ++ ++/* Controller reports inverted write-protect state */ ++#define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16) ++ ++/* MMC caps */ ++#define MMC_CAP2_CRC_SW_RETRY (1 << 30) ++ ++ ++/* for SDIO */ ++ ++/* Needs polling for card-detection */ ++#define MMC_CAP_NEEDS_POLL (1 << 5) ++ ++/* for SD card */ ++ ++/* Host supports UHS SDR12 mode */ ++#define MMC_CAP_UHS_SDR12 (1 << 16) ++/* Host supports UHS SDR25 mode */ ++#define MMC_CAP_UHS_SDR25 (1 << 17) ++/* Host supports UHS SDR50 mode */ ++#define MMC_CAP_UHS_SDR50 (1 << 18) ++/* Host supports UHS SDR104 mode */ ++#define MMC_CAP_UHS_SDR104 (1 << 19) ++/* Host supports UHS DDR50 mode */ ++#define MMC_CAP_UHS_DDR50 (1 << 20) ++ ++#endif /* K1_DT_BINDINGS_MMC_SDHCI_H */ +diff --git a/include/dt-bindings/pinctrl/k1-x-pinctrl.h b/include/dt-bindings/pinctrl/k1-x-pinctrl.h +new file mode 100644 +index 000000000000..3d3cb59e8aa3 +--- /dev/null ++++ b/include/dt-bindings/pinctrl/k1-x-pinctrl.h +@@ -0,0 +1,198 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/*Copyright (C) 2023 Spacemit Co., Ltd. */ ++#ifndef __DT_BINDINGS_K1X_PINCTRL_H ++#define __DT_BINDINGS_K1X_PINCTRL_H ++ ++/* pin offset */ ++#define PINID(x) ((x) + 1) ++ ++#define GPIO_00 PINID(0) ++#define GPIO_01 PINID(1) ++#define GPIO_02 PINID(2) ++#define GPIO_03 PINID(3) ++#define GPIO_04 PINID(4) ++#define GPIO_05 PINID(5) ++#define GPIO_06 PINID(6) ++#define GPIO_07 PINID(7) ++#define GPIO_08 PINID(8) ++#define GPIO_09 PINID(9) ++#define GPIO_10 PINID(10) ++#define GPIO_11 PINID(11) ++#define GPIO_12 PINID(12) ++#define GPIO_13 PINID(13) ++#define GPIO_14 PINID(14) ++#define GPIO_15 PINID(15) ++#define GPIO_16 PINID(16) ++#define GPIO_17 PINID(17) ++#define GPIO_18 PINID(18) ++#define GPIO_19 PINID(19) ++#define GPIO_20 PINID(20) ++#define GPIO_21 PINID(21) ++#define GPIO_22 PINID(22) ++#define GPIO_23 PINID(23) ++#define GPIO_24 PINID(24) ++#define GPIO_25 PINID(25) ++#define GPIO_26 PINID(26) ++#define GPIO_27 PINID(27) ++#define GPIO_28 PINID(28) ++#define GPIO_29 PINID(29) ++#define GPIO_30 PINID(30) ++#define GPIO_31 PINID(31) ++ ++#define GPIO_32 PINID(32) ++#define GPIO_33 PINID(33) ++#define GPIO_34 PINID(34) ++#define GPIO_35 PINID(35) ++#define GPIO_36 PINID(36) ++#define GPIO_37 PINID(37) ++#define GPIO_38 PINID(38) ++#define GPIO_39 PINID(39) ++#define GPIO_40 PINID(40) ++#define GPIO_41 PINID(41) ++#define GPIO_42 PINID(42) ++#define GPIO_43 PINID(43) ++#define GPIO_44 PINID(44) ++#define GPIO_45 PINID(45) ++#define GPIO_46 PINID(46) ++#define GPIO_47 PINID(47) ++#define GPIO_48 PINID(48) ++#define GPIO_49 PINID(49) ++#define GPIO_50 PINID(50) ++#define GPIO_51 PINID(51) ++#define GPIO_52 PINID(52) ++#define GPIO_53 PINID(53) ++#define GPIO_54 PINID(54) ++#define GPIO_55 PINID(55) ++#define GPIO_56 PINID(56) ++#define GPIO_57 PINID(57) ++#define GPIO_58 PINID(58) ++#define GPIO_59 PINID(59) ++#define GPIO_60 PINID(60) ++#define GPIO_61 PINID(61) ++#define GPIO_62 PINID(62) ++#define GPIO_63 PINID(63) ++ ++#define GPIO_64 PINID(64) ++#define GPIO_65 PINID(65) ++#define GPIO_66 PINID(66) ++#define GPIO_67 PINID(67) ++#define GPIO_68 PINID(68) ++#define GPIO_69 PINID(69) ++#define PRI_TDI PINID(70) ++#define PRI_TMS PINID(71) ++#define PRI_TCK PINID(72) ++#define PRI_TDO PINID(73) ++#define GPIO_74 PINID(74) ++#define GPIO_75 PINID(75) ++#define GPIO_76 PINID(76) ++#define GPIO_77 PINID(77) ++#define GPIO_78 PINID(78) ++#define GPIO_79 PINID(79) ++#define GPIO_80 PINID(80) ++#define GPIO_81 PINID(81) ++#define GPIO_82 PINID(82) ++#define GPIO_83 PINID(83) ++#define GPIO_84 PINID(84) ++#define GPIO_85 PINID(85) ++ ++#define QSPI_DAT0 PINID(89) ++#define QSPI_DAT1 PINID(90) ++#define QSPI_DAT2 PINID(91) ++#define QSPI_DAT3 PINID(92) ++#define QSPI_CSI PINID(93) ++#define QSPI_CLK PINID(94) ++ ++#define MMC1_DAT3 PINID(109) ++#define MMC1_DAT2 PINID(110) ++#define MMC1_DAT1 PINID(111) ++#define MMC1_DAT0 PINID(112) ++#define MMC1_CMD PINID(113) ++#define MMC1_CLK PINID(114) ++#define GPIO_110 PINID(115) ++#define PWR_SCL PINID(116) ++#define PWR_SDA PINID(117) ++#define VCXO_EN PINID(118) ++#define DVL0 PINID(119) ++#define DVL1 PINID(120) ++#define PMIC_INT_N PINID(121) ++#define GPIO_86 PINID(122) ++#define GPIO_87 PINID(123) ++#define GPIO_88 PINID(124) ++#define GPIO_89 PINID(125) ++#define GPIO_90 PINID(126) ++#define GPIO_91 PINID(127) ++#define GPIO_92 PINID(128) ++ ++#define GPIO_111 PINID(130) ++#define GPIO_112 PINID(131) ++#define GPIO_113 PINID(132) ++#define GPIO_114 PINID(133) ++#define GPIO_115 PINID(134) ++#define GPIO_116 PINID(135) ++#define GPIO_117 PINID(136) ++#define GPIO_118 PINID(137) ++#define GPIO_119 PINID(138) ++#define GPIO_120 PINID(139) ++#define GPIO_121 PINID(140) ++#define GPIO_122 PINID(141) ++#define GPIO_123 PINID(142) ++#define GPIO_124 PINID(143) ++#define GPIO_125 PINID(144) ++#define GPIO_126 PINID(145) ++#define GPIO_127 PINID(146) ++ ++/* pin mux */ ++#define MUX_MODE0 0 ++#define MUX_MODE1 1 ++#define MUX_MODE2 2 ++#define MUX_MODE3 3 ++#define MUX_MODE4 4 ++#define MUX_MODE5 5 ++#define MUX_MODE6 6 ++#define MUX_MODE7 7 ++ ++/* strong pull resistor */ ++#define SPU_EN (1 << 3) ++ ++/* edge detect */ ++#define EDGE_NONE (1 << 6) ++#define EDGE_RISE (1 << 4) ++#define EDGE_FALL (1 << 5) ++#define EDGE_BOTH (3 << 4) ++ ++/* slew rate output control */ ++#define SLE_EN (1 << 7) ++ ++/* schmitter trigger input threshold */ ++#define ST00 (0 << 8) ++#define ST01 (1 << 8) ++#define ST02 (2 << 8) ++#define ST03 (3 << 8) ++ ++/* driver strength*/ ++#define PAD_1V8_DS0 (0 << 11) ++#define PAD_1V8_DS1 (1 << 11) ++#define PAD_1V8_DS2 (2 << 11) ++#define PAD_1V8_DS3 (3 << 11) ++ ++/* ++ * notice: !!! ++ * ds2 ---> bit10, ds1 ----> bit12, ds0 ----> bit11 ++ */ ++#define PAD_3V_DS0 (0 << 10) /* bit[12:10] 000 */ ++#define PAD_3V_DS1 (2 << 10) /* bit[12:10] 010 */ ++#define PAD_3V_DS2 (4 << 10) /* bit[12:10] 100 */ ++#define PAD_3V_DS3 (6 << 10) /* bit[12:10] 110 */ ++#define PAD_3V_DS4 (1 << 10) /* bit[12:10] 001 */ ++#define PAD_3V_DS5 (3 << 10) /* bit[12:10] 011 */ ++#define PAD_3V_DS6 (5 << 10) /* bit[12:10] 101 */ ++#define PAD_3V_DS7 (7 << 10) /* bit[12:10] 111 */ ++ ++/* pull up/down */ ++#define PULL_DIS (0 << 13) /* bit[15:13] 000 */ ++#define PULL_UP (6 << 13) /* bit[15:13] 110 */ ++#define PULL_DOWN (5 << 13) /* bit[15:13] 101 */ ++ ++#define K1X_PADCONF(pinid, conf, mux) ((pinid) * 4) (conf) (mux) ++ ++#endif /* __DT_BINDINGS_K1PRO_PINCTRL_H */ +diff --git a/include/dt-bindings/pinctrl/ur-dp1000-pinctrl.h b/include/dt-bindings/pinctrl/ur-dp1000-pinctrl.h +new file mode 100644 +index 000000000000..c4f2e889f8f7 +--- /dev/null ++++ b/include/dt-bindings/pinctrl/ur-dp1000-pinctrl.h +@@ -0,0 +1,64 @@ ++/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ ++/* UltraRISC DP1000 pinctrl header ++ * ++ * Copyright(C) 2025 UltraRISC Technology Co., Ltd. ++ * ++ * Author: wangjia ++ */ ++ ++#ifndef __UR_DP1000_PINCTRL_H__ ++#define __UR_DP1000_PINCTRL_H__ ++ ++/** ++ * UltraRISC DP1000 IO pad configuration ++ * port: 'A' 'B' 'C' ++ * Pin in the port ++ * pin: ++ * PA: 0 - 15 ++ * PB-PD: 0 - 7 ++ * func: ++ * UR_FUNC_DEF: default ++ * UR_FUNC0: func0 ++ * UR_FUNC1: func1 ++ */ ++#define UR_DP1000_IOMUX_A 0x0 ++#define UR_DP1000_IOMUX_B 0x1 ++#define UR_DP1000_IOMUX_C 0x2 ++#define UR_DP1000_IOMUX_D 0x3 ++#define UR_DP1000_IOMUX_LPC 0x4 ++ ++#define UR_FUNC_DEF 0 ++#define UR_FUNC0 1 ++#define UR_FUNC1 0x10000 ++ ++/** ++ * Configure pull up/down resistor of the IO pin ++ * UR_PULL_DIS: disable pull-up and pull-down ++ * UR_PULL_UP: enable pull-up ++ * UR_PULL_DOWN: enable pull-down ++ */ ++#define UR_PULL_DIS 0 ++#define UR_PULL_UP 1 ++#define UR_PULL_DOWN 2 ++/** ++ * Configure drive strength of the IO pin ++ * UR_DRIVE_DEF: default value, reset value is 2 ++ * UR_DRIVE_0: 20mA ++ * UR_DRIVE_1: 27mA ++ * UR_DIRVE_2: 33mA ++ * UR_DRIVE_3: 40mA ++ */ ++#define UR_DRIVE_DEF 2 ++#define UR_DRIVE_0 0 ++#define UR_DRIVE_1 1 ++#define UR_DRIVE_2 2 ++#define UR_DRIVE_3 3 ++ ++/** ++ * Combine the pull-up/down resistor and drive strength ++ * pull: UR_PULL_DIS, UR_PULL_UP, UR_PULL_DOWN ++ * drive: UR_DRIVE_DEF, UR_DRIVE_0, UR_DRIVE_1, UR_DRIVE_2, UR_DRIVE_3 ++ */ ++#define UR_DP1000_BIAS(pull, drive) (((pull)<<2) + (drive)) ++ ++#endif diff --git a/include/dt-bindings/reset/sophgo-mango-resets.h b/include/dt-bindings/reset/sophgo-mango-resets.h new file mode 100644 index 000000000000..9ff8ca4c3d67 @@ -561693,6 +614554,138 @@ index 000000000000..9ff8ca4c3d67 +#define RST_MAX_NUM (RST_RXU31+1) + +#endif +diff --git a/include/dt-bindings/reset/spacemit-k1x-reset.h b/include/dt-bindings/reset/spacemit-k1x-reset.h +new file mode 100644 +index 000000000000..dc5779b818fe +--- /dev/null ++++ b/include/dt-bindings/reset/spacemit-k1x-reset.h +@@ -0,0 +1,126 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (c) 2023, spacemit Corporation. */ ++ ++#ifndef __DT_BINDINGS_RESET_SAPCEMIT_K1X_H__ ++#define __DT_BINDINGS_RESET_SAPCEMIT_K1X_H__ ++ ++#define RESET_UART1 1 ++#define RESET_UART2 2 ++#define RESET_GPIO 3 ++#define RESET_PWM0 4 ++#define RESET_PWM1 5 ++#define RESET_PWM2 6 ++#define RESET_PWM3 7 ++#define RESET_PWM4 8 ++#define RESET_PWM5 9 ++#define RESET_PWM6 10 ++#define RESET_PWM7 11 ++#define RESET_PWM8 12 ++#define RESET_PWM9 13 ++#define RESET_PWM10 14 ++#define RESET_PWM11 15 ++#define RESET_PWM12 16 ++#define RESET_PWM13 17 ++#define RESET_PWM14 18 ++#define RESET_PWM15 19 ++#define RESET_PWM16 20 ++#define RESET_PWM17 21 ++#define RESET_PWM18 22 ++#define RESET_PWM19 23 ++#define RESET_SSP3 24 ++#define RESET_UART3 25 ++#define RESET_RTC 26 ++#define RESET_TWSI0 27 ++#define RESET_TIMERS1 28 ++#define RESET_AIB 29 ++#define RESET_TIMERS2 30 ++#define RESET_ONEWIRE 31 ++#define RESET_SSPA0 32 ++#define RESET_SSPA1 33 ++#define RESET_DRO 34 ++#define RESET_IR 35 ++#define RESET_TWSI1 36 ++#define RESET_TSEN 37 ++#define RESET_TWSI2 38 ++#define RESET_TWSI4 39 ++#define RESET_TWSI5 40 ++#define RESET_TWSI6 41 ++#define RESET_TWSI7 42 ++#define RESET_TWSI8 43 ++#define RESET_IPC_AP2AUD 44 ++#define RESET_UART4 45 ++#define RESET_UART5 46 ++#define RESET_UART6 47 ++#define RESET_UART7 48 ++#define RESET_UART8 49 ++#define RESET_UART9 50 ++#define RESET_CAN0 51 ++#define RESET_WDT 52 ++#define RESET_JPG 53 ++#define RESET_CSI 54 ++#define RESET_CCIC2_PHY 55 ++#define RESET_CCIC3_PHY 56 ++#define RESET_ISP 57 ++#define RESET_ISP_AHB 58 ++#define RESET_ISP_CI 59 ++#define RESET_ISP_CPP 60 ++#define RESET_LCD 61 ++#define RESET_DSI_ESC 62 ++#define RESET_V2D 63 ++#define RESET_MIPI 64 ++#define RESET_LCD_SPI 65 ++#define RESET_LCD_SPI_BUS 66 ++#define RESET_LCD_SPI_HBUS 67 ++#define RESET_LCD_MCLK 68 ++#define RESET_CCIC_4X 69 ++#define RESET_CCIC1_PHY 70 ++#define RESET_SDH_AXI 71 ++#define RESET_SDH0 72 ++#define RESET_SDH1 73 ++#define RESET_USB_AXI 74 ++#define RESET_USBP1_AXI 75 ++#define RESET_USB3_0 76 ++#define RESET_QSPI 77 ++#define RESET_QSPI_BUS 78 ++#define RESET_DMA 79 ++#define RESET_AES 80 ++#define RESET_VPU 81 ++#define RESET_GPU 82 ++#define RESET_SDH2 83 ++#define RESET_MC 84 ++#define RESET_EM_AXI 85 ++#define RESET_EM 86 ++#define RESET_AUDIO_SYS 87 ++#define RESET_HDMI 88 ++#define RESET_PCIE0 89 ++#define RESET_PCIE1 90 ++#define RESET_PCIE2 91 ++#define RESET_EMAC0 92 ++#define RESET_EMAC1 93 ++#define RESET_SEC_UART1 94 ++#define RESET_SEC_SSP2 95 ++#define RESET_SEC_TWSI3 96 ++#define RESET_SEC_RTC 97 ++#define RESET_SEC_TIMERS0 98 ++#define RESET_SEC_KPC 99 ++#define RESET_SEC_GPIO 100 ++#define RESET_RCPU_HDMIAUDIO 101 ++#define RESET_RCPU_CAN 102 ++#define RESET_RCPU_I2C0 103 ++#define RESET_RCPU_SSP0 104 ++#define RESET_RCPU_IR 105 ++#define RESET_RCPU_UART0 106 ++#define RESET_RCPU_UART1 107 ++#define RESET_RCPU2_PWM0 108 ++#define RESET_RCPU2_PWM1 109 ++#define RESET_RCPU2_PWM2 110 ++#define RESET_RCPU2_PWM3 111 ++#define RESET_RCPU2_PWM4 112 ++#define RESET_RCPU2_PWM5 113 ++#define RESET_RCPU2_PWM6 114 ++#define RESET_RCPU2_PWM7 115 ++#define RESET_RCPU2_PWM8 116 ++#define RESET_RCPU2_PWM9 117 ++#define RESET_NUMBER 118 ++ ++#endif /* __DT_BINDINGS_RESET_SAPCEMIT_K1X_H__ */ diff --git a/include/dt-bindings/reset/xuantie,th1520-reset.h b/include/dt-bindings/reset/xuantie,th1520-reset.h new file mode 100644 index 000000000000..44a4581cc229 @@ -561818,14 +614811,84 @@ index 000000000000..c0370797443f +#define IOPMP_AUDIO1 29 + +#endif /* __DT_XUANTIE_TH1520_IOPMP_H__ */ +diff --git a/include/linux/acpi.h b/include/linux/acpi.h +index 83bb76c7d5a1..f3aa975327b6 100644 +--- a/include/linux/acpi.h ++++ b/include/linux/acpi.h +@@ -98,6 +98,7 @@ enum acpi_irq_model_id { + #ifdef CONFIG_SW64 + ACPI_IRQ_MODEL_SWPIC, + #endif ++ ACPI_IRQ_MODEL_RINTC, + ACPI_IRQ_MODEL_COUNT + }; + +@@ -291,6 +292,12 @@ acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { } + + int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); + ++#ifdef CONFIG_RISCV ++void acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa); ++#else ++static inline void acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa) { } ++#endif ++ + #ifndef PHYS_CPUID_INVALID + typedef u32 phys_cpuid_t; + #define PHYS_CPUID_INVALID (phys_cpuid_t)(-1) +@@ -1348,6 +1355,8 @@ struct acpi_probe_entry { + kernel_ulong_t driver_data; + }; + ++void arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr); ++ + #define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \ + valid, data, fn) \ + static const struct acpi_probe_entry __acpi_probe_##name \ +@@ -1551,6 +1560,12 @@ void acpi_arm_init(void); + static inline void acpi_arm_init(void) { } + #endif + ++#ifdef CONFIG_RISCV ++void acpi_riscv_init(void); ++#else ++static inline void acpi_riscv_init(void) { } ++#endif ++ + #ifdef CONFIG_ACPI_PCC + void acpi_init_pcc(void); + #else +diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h +index a5e397fe05a8..62823f1f12ae 100644 +--- a/include/linux/acpi_iort.h ++++ b/include/linux/acpi_iort.h +@@ -39,7 +39,7 @@ void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, + void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode, + struct list_head *head); + /* IOMMU interface */ +-int iort_dma_get_ranges(struct device *dev, u64 *size); ++int iort_dma_get_ranges(struct device *dev, u64 *limit); + int iort_iommu_configure_id(struct device *dev, const u32 *id_in); + void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head); + phys_addr_t acpi_iort_dma_get_max_cpu_address(void); +@@ -56,7 +56,7 @@ void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *hea + static inline + void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { } + /* IOMMU interface */ +-static inline int iort_dma_get_ranges(struct device *dev, u64 *size) ++static inline int iort_dma_get_ranges(struct device *dev, u64 *limit) + { return -ENODEV; } + static inline int iort_iommu_configure_id(struct device *dev, const u32 *id_in) + { return -ENODEV; } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h -index 8f3b474f3a70..61c981c1ec9d 100644 +index 8f3b474f3a70..5fc138b773c3 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h -@@ -158,6 +158,8 @@ enum cpuhp_state { +@@ -158,6 +158,9 @@ enum cpuhp_state { CPUHP_AP_IRQ_AVECINTC_STARTING, #endif CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, ++ CPUHP_AP_IRQ_THEAD_ACLINT_SSWI_STARTING, + CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING, + CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, @@ -561859,6 +614922,57 @@ index 4c26ede3b87d..b2da3c2837dd 100644 /** * cpumask_last - get the last CPU in a cpumask * @srcp: - the cpumask pointer +diff --git a/include/linux/crc32.h b/include/linux/crc32.h +index 9e8a032c1788..87f788c0d607 100644 +--- a/include/linux/crc32.h ++++ b/include/linux/crc32.h +@@ -9,7 +9,9 @@ + #include + + u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len); ++u32 __pure crc32_le_base(u32 crc, unsigned char const *p, size_t len); + u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len); ++u32 __pure crc32_be_base(u32 crc, unsigned char const *p, size_t len); + + /** + * crc32_le_combine - Combine two crc32 check values into one. For two +@@ -37,6 +39,7 @@ static inline u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2) + } + + u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len); ++u32 __pure __crc32c_le_base(u32 crc, unsigned char const *p, size_t len); + + /** + * __crc32c_le_combine - Combine two crc32c check values into one. For two +diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h +index 8589b0dafdb0..e17cbc424870 100644 +--- a/include/linux/dma-direct.h ++++ b/include/linux/dma-direct.h +@@ -49,6 +49,24 @@ static inline phys_addr_t translate_dma_to_phys(struct device *dev, + return (phys_addr_t)-1; + } + ++static inline dma_addr_t dma_range_map_min(const struct bus_dma_region *map) ++{ ++ dma_addr_t ret = (dma_addr_t)U64_MAX; ++ ++ for (; map->size; map++) ++ ret = min(ret, map->dma_start); ++ return ret; ++} ++ ++static inline dma_addr_t dma_range_map_max(const struct bus_dma_region *map) ++{ ++ dma_addr_t ret = 0; ++ ++ for (; map->size; map++) ++ ret = max(ret, map->dma_start + map->size - 1); ++ return ret; ++} ++ + #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA + #include + #ifndef phys_to_dma_unencrypted diff --git a/include/linux/find.h b/include/linux/find.h index 5e4f39ef2e72..a3a908a27df7 100644 --- a/include/linux/find.h @@ -562118,6 +615232,28 @@ index 000000000000..b88ae8727c29 +#endif + +#endif +diff --git a/include/linux/iommu.h b/include/linux/iommu.h +index 0dea4c637fd2..afd79ee6363a 100644 +--- a/include/linux/iommu.h ++++ b/include/linux/iommu.h +@@ -696,7 +696,7 @@ struct iommu_ops { + /* Request/Free a list of reserved regions for a device */ + void (*get_resv_regions)(struct device *dev, struct list_head *list); + +- int (*of_xlate)(struct device *dev, struct of_phandle_args *args); ++ int (*of_xlate)(struct device *dev, const struct of_phandle_args *args); + bool (*is_attach_deferred)(struct device *dev); + + /* Per device IOMMU features */ +@@ -1242,7 +1242,7 @@ struct iommu_mm_data { + int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, + const struct iommu_ops *ops); + void iommu_fwspec_free(struct device *dev); +-int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); ++int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids); + const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); + + static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) diff --git a/include/linux/irqchip/riscv-aplic.h b/include/linux/irqchip/riscv-aplic.h new file mode 100644 index 000000000000..ec8f7df50583 @@ -562271,10 +615407,10 @@ index 000000000000..ec8f7df50583 +#endif diff --git a/include/linux/irqchip/riscv-imsic.h b/include/linux/irqchip/riscv-imsic.h new file mode 100644 -index 000000000000..faf0b800b1b0 +index 000000000000..7494952c5518 --- /dev/null +++ b/include/linux/irqchip/riscv-imsic.h -@@ -0,0 +1,87 @@ +@@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. @@ -562285,6 +615421,8 @@ index 000000000000..faf0b800b1b0 + +#include +#include ++#include ++#include +#include + +#define IMSIC_MMIO_PAGE_SHIFT 12 @@ -562361,6 +615499,13 @@ index 000000000000..faf0b800b1b0 + +#endif + ++#ifdef CONFIG_ACPI ++int imsic_platform_acpi_probe(struct fwnode_handle *fwnode); ++struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev); ++#else ++static inline struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev) { return NULL; } ++#endif ++ +#endif diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 8594cd9b642e..2992c1851b63 100644 @@ -562403,6 +615548,262 @@ index c29921fd8cd1..5c1fe6f1fcde 100644 }; #endif /* _LINUX_IRQDOMAIN_DEFS_H */ +diff --git a/include/linux/mfd/spacemit_p1.h b/include/linux/mfd/spacemit_p1.h +new file mode 100644 +index 000000000000..52614b8dca58 +--- /dev/null ++++ b/include/linux/mfd/spacemit_p1.h +@@ -0,0 +1,250 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Spacemit P1 multi-function-device interface ++ * Copyright (c) 2023, SPACEMIT Co., Ltd ++ */ ++#ifndef __SPACEMIT_P1_H__ ++#define __SPACEMIT_P1_H__ ++ ++#include ++#include ++ ++enum SPM_P1_reg { ++ SPM_P1_ID_DCDC1, ++ SPM_P1_ID_DCDC2, ++ SPM_P1_ID_DCDC3, ++ SPM_P1_ID_DCDC4, ++ SPM_P1_ID_DCDC5, ++ SPM_P1_ID_DCDC6, ++ SPM_P1_ID_LDO1, ++ SPM_P1_ID_LDO2, ++ SPM_P1_ID_LDO3, ++ SPM_P1_ID_LDO4, ++ SPM_P1_ID_LDO5, ++ SPM_P1_ID_LDO6, ++ SPM_P1_ID_LDO7, ++ SPM_P1_ID_LDO8, ++ SPM_P1_ID_LDO9, ++ SPM_P1_ID_LDO10, ++ SPM_P1_ID_LDO11, ++ SPM_P1_ID_SWITCH1, ++}; ++ ++/* irq description */ ++enum IRQ_line { ++ /* reg: 0x91 */ ++ SPM_P1_E_GPI0, ++ SPM_P1_E_GPI1, ++ SPM_P1_E_GPI2, ++ SPM_P1_E_GPI3, ++ SPM_P1_E_GPI4, ++ SPM_P1_E_GPI5, ++ ++ /* reg: 0x92 */ ++ SPM_P1_E_ADC_TEMP, ++ SPM_P1_E_ADC_EOC, ++ SPM_P1_E_ADC_EOS, ++ SPM_P1_E_WDT_TO, ++ SPM_P1_E_ALARM, ++ SPM_P1_E_TICK, ++ ++ /* reg: 0x93 */ ++ SPM_P1_E_LDO_OV, ++ SPM_P1_E_LDO_UV, ++ SPM_P1_E_LDO_SC, ++ SPM_P1_E_SW_SC, ++ SPM_P1_E_TEMP_WARN, ++ SPM_P1_E_TEMP_SEVERE, ++ SPM_P1_E_TEMP_CRIT, ++ ++ /* reg: 0x94 */ ++ SPM_P1_E_BUCK1_OV, ++ SPM_P1_E_BUCK2_OV, ++ SPM_P1_E_BUCK3_OV, ++ SPM_P1_E_BUCK4_OV, ++ SPM_P1_E_BUCK5_OV, ++ SPM_P1_E_BUCK6_OV, ++ ++ /* reg: 0x95 */ ++ SPM_P1_E_BUCK1_UV, ++ SPM_P1_E_BUCK2_UV, ++ SPM_P1_E_BUCK3_UV, ++ SPM_P1_E_BUCK4_UV, ++ SPM_P1_E_BUCK5_UV, ++ SPM_P1_E_BUCK6_UV, ++ ++ /* reg: 0x96 */ ++ SPM_P1_E_BUCK1_SC, ++ SPM_P1_E_BUCK2_SC, ++ SPM_P1_E_BUCK3_SC, ++ SPM_P1_E_BUCK4_SC, ++ SPM_P1_E_BUCK5_SC, ++ SPM_P1_E_BUCK6_SC, ++ ++ /* reg: 0x97 */ ++ SPM_P1_E_PWRON_RINTR, ++ SPM_P1_E_PWRON_FINTR, ++ SPM_P1_E_PWRON_SINTR, ++ SPM_P1_E_PWRON_LINTR, ++ SPM_P1_E_PWRON_SDINTR, ++ SPM_P1_E_VSYS_OV, ++}; ++ ++#define SPM_P1_MAX_REG 0xB0 ++ ++#define SPM_P1_VERSION_ID_REG 0xa1 ++ ++#define SPM_P1_BUCK_VSEL_MASK 0xff ++#define SMP8821_BUCK_EN_MASK 0x1 ++ ++#define SPM_P1_BUCK1_CTRL_REG 0x47 ++#define SPM_P1_BUCK2_CTRL_REG 0x4a ++#define SPM_P1_BUCK3_CTRL_REG 0x4d ++#define SPM_P1_BUCK4_CTRL_REG 0x50 ++#define SPM_P1_BUCK5_CTRL_REG 0x53 ++#define SPM_P1_BUCK6_CTRL_REG 0x56 ++ ++#define SPM_P1_BUCK1_VSEL_REG 0x48 ++#define SPM_P1_BUCK2_VSEL_REG 0x4b ++#define SPM_P1_BUCK3_VSEL_REG 0x4e ++#define SPM_P1_BUCK4_VSEL_REG 0x51 ++#define SPM_P1_BUCK5_VSEL_REG 0x54 ++#define SPM_P1_BUCK6_VSEL_REG 0x57 ++ ++#define SPM_P1_ALDO1_CTRL_REG 0x5b ++#define SPM_P1_ALDO2_CTRL_REG 0x5e ++#define SPM_P1_ALDO3_CTRL_REG 0x61 ++#define SPM_P1_ALDO4_CTRL_REG 0x64 ++ ++#define SPM_P1_ALDO1_VOLT_REG 0x5c ++#define SPM_P1_ALDO2_VOLT_REG 0x5f ++#define SPM_P1_ALDO3_VOLT_REG 0x62 ++#define SPM_P1_ALDO4_VOLT_REG 0x65 ++ ++#define SPM_P1_ALDO_EN_MASK 0x1 ++#define SPM_P1_ALDO_VSEL_MASK 0x7f ++ ++#define SPM_P1_DLDO1_CTRL_REG 0x67 ++#define SPM_P1_DLDO2_CTRL_REG 0x6a ++#define SPM_P1_DLDO3_CTRL_REG 0x6d ++#define SPM_P1_DLDO4_CTRL_REG 0x70 ++#define SPM_P1_DLDO5_CTRL_REG 0x73 ++#define SPM_P1_DLDO6_CTRL_REG 0x76 ++#define SPM_P1_DLDO7_CTRL_REG 0x79 ++ ++#define SPM_P1_DLDO1_VOLT_REG 0x68 ++#define SPM_P1_DLDO2_VOLT_REG 0x6b ++#define SPM_P1_DLDO3_VOLT_REG 0x6e ++#define SPM_P1_DLDO4_VOLT_REG 0x71 ++#define SPM_P1_DLDO5_VOLT_REG 0x74 ++#define SPM_P1_DLDO6_VOLT_REG 0x77 ++#define SPM_P1_DLDO7_VOLT_REG 0x7a ++ ++#define SPM_P1_DLDO_EN_MASK 0x1 ++#define SPM_P1_DLDO_VSEL_MASK 0x7f ++ ++#define SPM_P1_SWITCH_CTRL_REG 0x59 ++#define SPM_P1_SWTICH_EN_MASK 0x1 ++ ++#define SPM_P1_PWR_CTRL2 0x7e ++#define SPM_P1_SW_SHUTDOWN_BIT_MSK 0x4 ++#define SPM_P1_SW_RESET_BIT_MSK 0x2 ++ ++#define SPM_P1_NON_RESET_REG 0xAB ++#define SPM_P1_RESTART_CFG_BIT_MSK 0x7 ++ ++#define SPM_P1_SLEEP_REG_OFFSET 0x1 ++ ++#define SPM_P1_ADC_AUTO_REG 0x22 ++#define SPM_P1_ADC_AUTO_BIT_MSK 0x7f ++ ++#define SPM_P1_ADC_CTRL_REG 0x1e ++#define SPM_P1_ADC_CTRL_BIT_MSK 0x3 ++#define SPM_P1_ADC_CTRL_EN_BIT_OFFSET 0x0 ++#define SPM_P1_ADC_CTRL_GO_BIT_OFFSET 0x1 ++ ++#define SPM_P1_ADC_CFG1_REG 0x20 ++ ++#define SPM_P1_ADC_CFG1_ADC_CHOP_EN_BIT_OFFSET 0x6 ++#define SPM_P1_ADC_CFG1_ADC_CHOP_EN_BIT_MSK 0x40 ++ ++#define SPM_P1_ADC_CFG1_ADC_CHNNL_SEL_BIT_OFFSET 0x3 ++#define SPM_P1_ADC_CFG1_ADC_CHNNL_SEL_BIT_MSK 0x38 ++ ++#define SPM_P1_ADC_CFG2_REG 0x21 ++#define SPM_P1_ADC_CFG2_REF_SEL_BIT_OFFSET 0x0 ++#define SPM_P1_ADC_CFG2_REF_SEL_BIT_MSK 0x3 ++#define SPM_P1_ADC_CFG2_3V3_REF 0x2 ++ ++#define SPM_P1_ADC_CFG2_7_DEB_NUM 0x7 ++#define SPM_P1_ADC_CFG2_DEB_NUM_BIT_MSK 0x70 ++#define SPM_P1_ADC_CFG2_DEB_NUM_BIT_OFFSET 0x4 ++ ++#define SPM_P1_ADC_EXTERNAL_CHANNEL_OFFSET 2 ++ ++#define SPM_P1_ADCIN0_RES_H_REG 0x2a ++#define SPM_P1_ADCIN0_RES_L_REG 0x2b ++#define SPM_P1_ADCIN0_REG_L_BIT_MSK 0xf0 ++ ++#define SPM_P1_E_GPI0_MSK BIT(0) ++#define SPM_P1_E_GPI1_MSK BIT(1) ++#define SPM_P1_E_GPI2_MSK BIT(2) ++#define SPM_P1_E_GPI3_MSK BIT(3) ++#define SPM_P1_E_GPI4_MSK BIT(4) ++#define SPM_P1_E_GPI5_MSK BIT(5) ++ ++#define SPM_P1_E_ADC_TEMP_MSK BIT(0) ++#define SPM_P1_E_ADC_EOC_MSK BIT(1) ++#define SPM_P1_E_ADC_EOS_MSK BIT(2) ++#define SPM_P1_E_WDT_TO_MSK BIT(3) ++#define SPM_P1_E_ALARM_MSK BIT(4) ++#define SPM_P1_E_TICK_MSK BIT(5) ++ ++#define SPM_P1_E_LDO_OV_MSK BIT(0) ++#define SPM_P1_E_LDO_UV_MSK BIT(1) ++#define SPM_P1_E_LDO_SC_MSK BIT(2) ++#define SPM_P1_E_SW_SC_MSK BIT(3) ++#define SPM_P1_E_TEMP_WARN_MSK BIT(4) ++#define SPM_P1_E_TEMP_SEVERE_MSK BIT(5) ++#define SPM_P1_E_TEMP_CRIT_MSK BIT(6) ++ ++#define SPM_P1_E_BUCK1_OV_MSK BIT(0) ++#define SPM_P1_E_BUCK2_OV_MSK BIT(1) ++#define SPM_P1_E_BUCK3_OV_MSK BIT(2) ++#define SPM_P1_E_BUCK4_OV_MSK BIT(3) ++#define SPM_P1_E_BUCK5_OV_MSK BIT(4) ++#define SPM_P1_E_BUCK6_OV_MSK BIT(5) ++ ++#define SPM_P1_E_BUCK1_UV_MSK BIT(0) ++#define SPM_P1_E_BUCK2_UV_MSK BIT(1) ++#define SPM_P1_E_BUCK3_UV_MSK BIT(2) ++#define SPM_P1_E_BUCK4_UV_MSK BIT(3) ++#define SPM_P1_E_BUCK5_UV_MSK BIT(4) ++#define SPM_P1_E_BUCK6_UV_MSK BIT(5) ++ ++#define SPM_P1_E_BUCK1_SC_MSK BIT(0) ++#define SPM_P1_E_BUCK2_SC_MSK BIT(1) ++#define SPM_P1_E_BUCK3_SC_MSK BIT(2) ++#define SPM_P1_E_BUCK4_SC_MSK BIT(3) ++#define SPM_P1_E_BUCK5_SC_MSK BIT(4) ++#define SPM_P1_E_BUCK6_SC_MSK BIT(5) ++ ++#define SPM_P1_E_PWRON_RINTR_MSK BIT(0) ++#define SPM_P1_E_PWRON_FINTR_MSK BIT(1) ++#define SPM_P1_E_PWRON_SINTR_MSK BIT(2) ++#define SPM_P1_E_PWRON_LINTR_MSK BIT(3) ++#define SPM_P1_E_PWRON_SDINTR_MSK BIT(4) ++#define SPM_P1_E_VSYS_OV_MSK BIT(5) ++ ++#define SPM_P1_E_STATUS_REG_BASE 0x91 ++#define SPM_P1_E_EN_REG_BASE 0x98 ++ ++struct spacemit_pmic { ++ struct i2c_client *i2c; ++ struct regmap_irq_chip_data *irq_data; ++ struct regmap *regmap; ++ const struct regmap_config *regmap_cfg; ++ const struct regmap_irq_chip *regmap_irq_chip; ++}; ++ ++#endif /* __SPACEMIT_P1_H__ */ diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 27f42f713c89..7617930d3157 100644 --- a/include/linux/mlx4/device.h @@ -562417,7 +615818,7 @@ index 27f42f713c89..7617930d3157 100644 #define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \ (dev_cap).num_ports * MIN_MSIX_P_PORT) diff --git a/include/linux/mm.h b/include/linux/mm.h -index c127b74f2c90..53c09345618a 100644 +index 1f36bf9ee02f..9f1207c2ec33 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3182,6 +3182,22 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) @@ -562531,6 +615932,178 @@ index 5fd8a6caae98..b0ac7a49b529 100644 bool msi_device_has_isolated_msi(struct device *dev); #else /* CONFIG_GENERIC_MSI_IRQ */ +diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h +index ac473d00e9a1..88a4dc3d1f46 100644 +--- a/include/linux/pci-ecam.h ++++ b/include/linux/pci-ecam.h +@@ -89,6 +89,7 @@ extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ + extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */ + extern const struct pci_ecam_ops loongson_pci_ecam_ops; /* Loongson PCIe */ + extern const struct pci_ecam_ops sunway_pci_ecam_ops; /* Sunway PCIe */ ++extern const struct pci_ecam_ops sophgo_pci_ecam_ops; /* Sophgo PCIe */ + #endif + + #if IS_ENABLED(CONFIG_PCI_HOST_COMMON) +diff --git a/include/linux/platform_data/spacemit_k1_sdhci.h b/include/linux/platform_data/spacemit_k1_sdhci.h +new file mode 100644 +index 000000000000..9f25a53010df +--- /dev/null ++++ b/include/linux/platform_data/spacemit_k1_sdhci.h +@@ -0,0 +1,99 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2023 Spacemit ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#ifndef _SPACEMIT_K1_SDHCI_H_ ++#define _SPACEMIT_K1_SDHCI_H_ ++ ++#define CANDIDATE_WIN_NUM 3 ++#define SELECT_DELAY_NUM 9 ++#define WINDOW_1ST 0 ++#define WINDOW_2ND 1 ++#define WINDOW_3RD 2 ++ ++#define RX_TUNING_WINDOW_THRESHOLD 80 ++#define RX_TUNING_DLINE_REG 0x09 ++#define TX_TUNING_DLINE_REG 0x00 ++#define TX_TUNING_DELAYCODE 127 ++ ++enum window_type { ++ LEFT_WINDOW = 0, ++ MIDDLE_WINDOW = 1, ++ RIGHT_WINDOW = 2, ++}; ++ ++struct tuning_window { ++ u8 type; ++ u8 min_delay; ++ u8 max_delay; ++}; ++ ++struct rx_tuning { ++ u8 rx_dline_reg; ++ u8 select_delay_num; ++ u8 current_delay_index; ++ /* 0: biggest window, 1: bigger, 2: small */ ++ struct tuning_window windows[CANDIDATE_WIN_NUM]; ++ u8 select_delay[SELECT_DELAY_NUM]; ++ ++ u32 card_cid[4]; ++ u8 window_limit; ++ u8 tuning_fail; ++ u8 window_type; ++}; ++ ++/* ++ * struct k1_sdhci_platdata() - Platform device data for Spacemit K1 SDHCI ++ * @flags: flags for platform requirement ++ * @host_caps: Standard MMC host capabilities bit field ++ * @host_caps2: Standard MMC host capabilities bit field ++ * @host_caps_disable: Aquila MMC host capabilities disable bit field ++ * @host_caps2_disable: Aquila MMC host capabilities disable bit field ++ * @quirks: quirks of platform ++ * @quirks2: quirks2 of platform ++ * @pm_caps: pm_caps of platform ++ */ ++struct k1_sdhci_platdata { ++ u32 host_freq; ++ u32 flags; ++ u32 host_caps; ++ u32 host_caps2; ++ u32 host_caps_disable; ++ u32 host_caps2_disable; ++ u32 quirks; ++ u32 quirks2; ++ u32 pm_caps; ++ ++ u32 aib_mmc1_io_reg; ++ u32 apbc_asfar_reg; ++ u32 apbc_assar_reg; ++ ++ u8 tx_dline_reg; ++ u8 tx_delaycode; ++ u8 tx_delaycode_cnt; ++ u32 tx_delaycode_array[2]; ++ bool tx_need_update; ++ wait_queue_head_t wait_queue; ++ atomic_t ref_count; ++ u8 phy_driver_sel; ++ struct rx_tuning rxtuning; ++ u8 need_reset_dllcfg1; ++ u32 prev_dllcfg1; ++ u32 curr_dllcfg1; ++ u32 new_dllcfg1; ++ u8 dllcfg1_odd_reset; ++ u32 rx_tuning_freq; ++}; ++ ++#endif /* _SPACEMIT_K1_SDHCI_H_ */ +diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h +index 5136eade96a4..052df85dfd59 100644 +--- a/include/linux/serial_core.h ++++ b/include/linux/serial_core.h +@@ -978,6 +978,8 @@ int uart_register_driver(struct uart_driver *uart); + void uart_unregister_driver(struct uart_driver *uart); + int uart_add_one_port(struct uart_driver *reg, struct uart_port *port); + void uart_remove_one_port(struct uart_driver *reg, struct uart_port *port); ++int uart_read_port_properties(struct uart_port *port); ++int uart_read_and_validate_port_properties(struct uart_port *port); + bool uart_match_port(const struct uart_port *port1, + const struct uart_port *port2); + +diff --git a/include/linux/sizes.h b/include/linux/sizes.h +index 84aa448d8bb3..c3a00b967d18 100644 +--- a/include/linux/sizes.h ++++ b/include/linux/sizes.h +@@ -47,8 +47,17 @@ + #define SZ_8G _AC(0x200000000, ULL) + #define SZ_16G _AC(0x400000000, ULL) + #define SZ_32G _AC(0x800000000, ULL) ++#define SZ_64G _AC(0x1000000000, ULL) ++#define SZ_128G _AC(0x2000000000, ULL) ++#define SZ_256G _AC(0x4000000000, ULL) ++#define SZ_512G _AC(0x8000000000, ULL) + + #define SZ_1T _AC(0x10000000000, ULL) ++#define SZ_2T _AC(0x20000000000, ULL) ++#define SZ_4T _AC(0x40000000000, ULL) ++#define SZ_8T _AC(0x80000000000, ULL) ++#define SZ_16T _AC(0x100000000000, ULL) ++#define SZ_32T _AC(0x200000000000, ULL) + #define SZ_64T _AC(0x400000000000, ULL) + + #endif /* __LINUX_SIZES_H__ */ +diff --git a/include/linux/string_choices.h b/include/linux/string_choices.h +index b03d0b126080..d656f0f60ec0 100644 +--- a/include/linux/string_choices.h ++++ b/include/linux/string_choices.h +@@ -53,4 +53,15 @@ static inline const char *str_true_false(bool v) + } + #define str_false_true(v) str_true_false(!(v)) + ++/** ++ * str_plural - Return the simple pluralization based on English counts ++ * @num: Number used for deciding pluralization ++ * ++ * If @num is 1, returns empty string, otherwise returns "s". ++ */ ++static inline const char *str_plural(size_t num) ++{ ++ return num == 1 ? "" : "s"; ++} ++ + #endif diff --git a/include/linux/sync_core.h b/include/linux/sync_core.h index 013da4b8b327..67bb9794b875 100644 --- a/include/linux/sync_core.h @@ -562966,10 +616539,10 @@ index 000000000000..cfb1f017480c + +#endif /* __VS_DRM_H__ */ diff --git a/init/Kconfig b/init/Kconfig -index 4c566c4bbfa4..0e63ee288041 100644 +index 2720083aaa17..29ad20cdb2dd 100644 --- a/init/Kconfig +++ b/init/Kconfig -@@ -2158,6 +2158,9 @@ source "kernel/Kconfig.locks" +@@ -2257,6 +2257,9 @@ source "kernel/Kconfig.locks" config ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE bool @@ -563404,7 +616977,7 @@ index 5a4717a82024..915a4c2b1f65 100644 * msi_get_domain_info - Get the MSI interrupt domain info for @domain * @domain: The interrupt domain to retrieve data from diff --git a/kernel/panic.c b/kernel/panic.c -index ef9f9a4e928d..824220b20ad7 100644 +index b78d80596617..018efee81805 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -37,6 +37,7 @@ @@ -563430,10 +617003,10 @@ index ef9f9a4e928d..824220b20ad7 100644 /* * This thread may hit another WARN() in the panic path. diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 6a01cd02b579..615126e2eac2 100644 +index 47877f3b52f6..b8316cecdf81 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -6663,7 +6663,9 @@ static void __sched notrace __schedule(unsigned int sched_mode) +@@ -6679,7 +6679,9 @@ static void __sched notrace __schedule(unsigned int sched_mode) * if (signal_pending_state()) if (p->state & @state) * * Also, the membarrier system call requires a full memory barrier @@ -563444,7 +617017,7 @@ index 6a01cd02b579..615126e2eac2 100644 */ rq_lock(rq, &rf); smp_mb__after_spinlock(); -@@ -6741,6 +6743,13 @@ static void __sched notrace __schedule(unsigned int sched_mode) +@@ -6757,6 +6759,13 @@ static void __sched notrace __schedule(unsigned int sched_mode) * architectures where spin_unlock is a full barrier, * - switch_to() for arm64 (weakly-ordered, spin_unlock * is a RELEASE barrier), @@ -563459,10 +617032,10 @@ index 6a01cd02b579..615126e2eac2 100644 ++*switch_count; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index c9a4ea466689..5755f9978166 100644 +index b22f3c072d20..baa23111b7e7 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -10614,6 +10614,9 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) +@@ -10848,6 +10848,9 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) if (kthread_is_per_cpu(p)) return 0; @@ -563562,10 +617135,10 @@ index 32f99e9a670e..dacadd904250 100644 /* * Find the first cleared bit in a memory region. diff --git a/mm/memblock.c b/mm/memblock.c -index f84852a16ce4..4c9ff29245f4 100644 +index dca1c491dcdb..f2c535d90f87 100644 --- a/mm/memblock.c +++ b/mm/memblock.c -@@ -1818,6 +1818,7 @@ phys_addr_t __init_memblock memblock_end_of_DRAM(void) +@@ -1811,6 +1811,7 @@ phys_addr_t __init_memblock memblock_end_of_DRAM(void) static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit) { @@ -563573,7 +617146,7 @@ index f84852a16ce4..4c9ff29245f4 100644 phys_addr_t max_addr = PHYS_ADDR_MAX; struct memblock_region *r; -@@ -1827,11 +1828,10 @@ static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit) +@@ -1820,11 +1821,10 @@ static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit) * of those regions, max_addr will keep original value PHYS_ADDR_MAX */ for_each_mem_region(r) { @@ -564200,7 +617773,7 @@ index a11cd7d6295f..03ecfa43bc3a 100644 return -EPIPE; } diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c -index 77ed507fc6e1..82e4d8d8801f 100644 +index cb8f46028159..da563420ba29 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -298,8 +298,7 @@ enum { @@ -564244,7 +617817,7 @@ index 8376fdb217ed..1b211fbb873b 100644 obj-$(CONFIG_SND_SOC) += xtensa/ +obj-$(CONFIG_SND_SOC) += xuantie/ diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig -index f1e1dbc509f6..fc394a65f681 100644 +index 6d105a23c828..68e7aa0479b4 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -54,6 +54,7 @@ config SND_SOC_ALL_CODECS @@ -575156,6 +628729,564 @@ index 000000000000..d89d90500cc7 + } + +] +diff --git a/tools/testing/selftests/hid/Makefile b/tools/testing/selftests/hid/Makefile +index 2e75fb30f3a5..748e0c79a27d 100644 +--- a/tools/testing/selftests/hid/Makefile ++++ b/tools/testing/selftests/hid/Makefile +@@ -24,7 +24,9 @@ CXX ?= $(CROSS_COMPILE)g++ + + HOSTPKG_CONFIG := pkg-config + +-CFLAGS += -g -O0 -rdynamic -Wall -Werror -I$(KHDR_INCLUDES) -I$(OUTPUT) ++CFLAGS += -g -O0 -rdynamic -Wall -Werror -I$(OUTPUT) ++CFLAGS += -I$(OUTPUT)/tools/include ++ + LDLIBS += -lelf -lz -lrt -lpthread + + # Silence some warnings when compiled with clang +@@ -68,7 +70,6 @@ BPFTOOLDIR := $(TOOLSDIR)/bpf/bpftool + SCRATCH_DIR := $(OUTPUT)/tools + BUILD_DIR := $(SCRATCH_DIR)/build + INCLUDE_DIR := $(SCRATCH_DIR)/include +-KHDR_INCLUDES := $(SCRATCH_DIR)/uapi/include + BPFOBJ := $(BUILD_DIR)/libbpf/libbpf.a + ifneq ($(CROSS_COMPILE),) + HOST_BUILD_DIR := $(BUILD_DIR)/host +@@ -154,9 +155,6 @@ else + $(Q)cp "$(VMLINUX_H)" $@ + endif + +-$(KHDR_INCLUDES)/linux/hid.h: $(top_srcdir)/include/uapi/linux/hid.h +- $(MAKE) -C $(top_srcdir) INSTALL_HDR_PATH=$(SCRATCH_DIR)/uapi headers_install +- + $(RESOLVE_BTFIDS): $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/resolve_btfids \ + $(TOOLSDIR)/bpf/resolve_btfids/main.c \ + $(TOOLSDIR)/lib/rbtree.c \ +@@ -234,7 +232,7 @@ $(BPF_SKELS): %.skel.h: %.bpf.o $(BPFTOOL) | $(OUTPUT) + $(Q)$(BPFTOOL) gen object $(<:.o=.linked1.o) $< + $(Q)$(BPFTOOL) gen skeleton $(<:.o=.linked1.o) name $(notdir $(<:.bpf.o=)) > $@ + +-$(OUTPUT)/%.o: %.c $(BPF_SKELS) $(KHDR_INCLUDES)/linux/hid.h ++$(OUTPUT)/%.o: %.c $(BPF_SKELS) + $(call msg,CC,,$@) + $(Q)$(CC) $(CFLAGS) -c $(filter %.c,$^) $(LDLIBS) -o $@ + +diff --git a/tools/testing/selftests/hid/progs/hid.c b/tools/testing/selftests/hid/progs/hid.c +index 88c593f753b5..1e558826b809 100644 +--- a/tools/testing/selftests/hid/progs/hid.c ++++ b/tools/testing/selftests/hid/progs/hid.c +@@ -1,8 +1,5 @@ + // SPDX-License-Identifier: GPL-2.0 + /* Copyright (c) 2022 Red hat */ +-#include "vmlinux.h" +-#include +-#include + #include "hid_bpf_helpers.h" + + char _license[] SEC("license") = "GPL"; +diff --git a/tools/testing/selftests/hid/progs/hid_bpf_helpers.h b/tools/testing/selftests/hid/progs/hid_bpf_helpers.h +index 4fff31dbe0e7..65e657ac1198 100644 +--- a/tools/testing/selftests/hid/progs/hid_bpf_helpers.h ++++ b/tools/testing/selftests/hid/progs/hid_bpf_helpers.h +@@ -5,6 +5,83 @@ + #ifndef __HID_BPF_HELPERS_H + #define __HID_BPF_HELPERS_H + ++/* "undefine" structs and enums in vmlinux.h, because we "override" them below */ ++#define hid_bpf_ctx hid_bpf_ctx___not_used ++#define hid_report_type hid_report_type___not_used ++#define hid_class_request hid_class_request___not_used ++#define hid_bpf_attach_flags hid_bpf_attach_flags___not_used ++#define HID_INPUT_REPORT HID_INPUT_REPORT___not_used ++#define HID_OUTPUT_REPORT HID_OUTPUT_REPORT___not_used ++#define HID_FEATURE_REPORT HID_FEATURE_REPORT___not_used ++#define HID_REPORT_TYPES HID_REPORT_TYPES___not_used ++#define HID_REQ_GET_REPORT HID_REQ_GET_REPORT___not_used ++#define HID_REQ_GET_IDLE HID_REQ_GET_IDLE___not_used ++#define HID_REQ_GET_PROTOCOL HID_REQ_GET_PROTOCOL___not_used ++#define HID_REQ_SET_REPORT HID_REQ_SET_REPORT___not_used ++#define HID_REQ_SET_IDLE HID_REQ_SET_IDLE___not_used ++#define HID_REQ_SET_PROTOCOL HID_REQ_SET_PROTOCOL___not_used ++#define HID_BPF_FLAG_NONE HID_BPF_FLAG_NONE___not_used ++#define HID_BPF_FLAG_INSERT_HEAD HID_BPF_FLAG_INSERT_HEAD___not_used ++#define HID_BPF_FLAG_MAX HID_BPF_FLAG_MAX___not_used ++ ++#include "vmlinux.h" ++ ++#undef hid_bpf_ctx ++#undef hid_report_type ++#undef hid_class_request ++#undef hid_bpf_attach_flags ++#undef HID_INPUT_REPORT ++#undef HID_OUTPUT_REPORT ++#undef HID_FEATURE_REPORT ++#undef HID_REPORT_TYPES ++#undef HID_REQ_GET_REPORT ++#undef HID_REQ_GET_IDLE ++#undef HID_REQ_GET_PROTOCOL ++#undef HID_REQ_SET_REPORT ++#undef HID_REQ_SET_IDLE ++#undef HID_REQ_SET_PROTOCOL ++#undef HID_BPF_FLAG_NONE ++#undef HID_BPF_FLAG_INSERT_HEAD ++#undef HID_BPF_FLAG_MAX ++ ++#include ++#include ++#include ++ ++enum hid_report_type { ++ HID_INPUT_REPORT = 0, ++ HID_OUTPUT_REPORT = 1, ++ HID_FEATURE_REPORT = 2, ++ ++ HID_REPORT_TYPES, ++}; ++ ++struct hid_bpf_ctx { ++ __u32 index; ++ const struct hid_device *hid; ++ __u32 allocated_size; ++ enum hid_report_type report_type; ++ union { ++ __s32 retval; ++ __s32 size; ++ }; ++} __attribute__((preserve_access_index)); ++ ++enum hid_class_request { ++ HID_REQ_GET_REPORT = 0x01, ++ HID_REQ_GET_IDLE = 0x02, ++ HID_REQ_GET_PROTOCOL = 0x03, ++ HID_REQ_SET_REPORT = 0x09, ++ HID_REQ_SET_IDLE = 0x0A, ++ HID_REQ_SET_PROTOCOL = 0x0B, ++}; ++ ++enum hid_bpf_attach_flags { ++ HID_BPF_FLAG_NONE = 0, ++ HID_BPF_FLAG_INSERT_HEAD = _BITUL(0), ++ HID_BPF_FLAG_MAX, ++}; ++ + /* following are kfuncs exported by HID for HID-BPF */ + extern __u8 *hid_bpf_get_data(struct hid_bpf_ctx *ctx, + unsigned int offset, +diff --git a/tools/testing/selftests/riscv/hwprobe/Makefile b/tools/testing/selftests/riscv/hwprobe/Makefile +index ebdbb3c22e54..f224b84591fb 100644 +--- a/tools/testing/selftests/riscv/hwprobe/Makefile ++++ b/tools/testing/selftests/riscv/hwprobe/Makefile +@@ -2,9 +2,14 @@ + # Copyright (C) 2021 ARM Limited + # Originally tools/testing/arm64/abi/Makefile + +-TEST_GEN_PROGS := hwprobe ++CFLAGS += -I$(top_srcdir)/tools/include ++ ++TEST_GEN_PROGS := hwprobe cbo + + include ../../lib.mk + + $(OUTPUT)/hwprobe: hwprobe.c sys_hwprobe.S +- $(CC) -o$@ $(CFLAGS) $(LDFLAGS) $^ ++ $(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^ ++ ++$(OUTPUT)/cbo: cbo.c sys_hwprobe.S ++ $(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^ +diff --git a/tools/testing/selftests/riscv/hwprobe/cbo.c b/tools/testing/selftests/riscv/hwprobe/cbo.c +new file mode 100644 +index 000000000000..50a2cc8aef38 +--- /dev/null ++++ b/tools/testing/selftests/riscv/hwprobe/cbo.c +@@ -0,0 +1,228 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (c) 2023 Ventana Micro Systems Inc. ++ * ++ * Run with 'taskset -c cbo' to only execute hwprobe on a ++ * subset of cpus, as well as only executing the tests on those cpus. ++ */ ++#define _GNU_SOURCE ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "hwprobe.h" ++#include "../../kselftest.h" ++ ++#define MK_CBO(fn) cpu_to_le32((fn) << 20 | 10 << 15 | 2 << 12 | 0 << 7 | 15) ++ ++static char mem[4096] __aligned(4096) = { [0 ... 4095] = 0xa5 }; ++ ++static bool illegal_insn; ++ ++static void sigill_handler(int sig, siginfo_t *info, void *context) ++{ ++ unsigned long *regs = (unsigned long *)&((ucontext_t *)context)->uc_mcontext; ++ uint32_t insn = *(uint32_t *)regs[0]; ++ ++ assert(insn == MK_CBO(regs[11])); ++ ++ illegal_insn = true; ++ regs[0] += 4; ++} ++ ++static void cbo_insn(char *base, int fn) ++{ ++ uint32_t insn = MK_CBO(fn); ++ ++ asm volatile( ++ "mv a0, %0\n" ++ "li a1, %1\n" ++ ".4byte %2\n" ++ : : "r" (base), "i" (fn), "i" (insn) : "a0", "a1", "memory"); ++} ++ ++static void cbo_inval(char *base) { cbo_insn(base, 0); } ++static void cbo_clean(char *base) { cbo_insn(base, 1); } ++static void cbo_flush(char *base) { cbo_insn(base, 2); } ++static void cbo_zero(char *base) { cbo_insn(base, 4); } ++ ++static void test_no_zicbom(void *arg) ++{ ++ ksft_print_msg("Testing Zicbom instructions remain privileged\n"); ++ ++ illegal_insn = false; ++ cbo_clean(&mem[0]); ++ ksft_test_result(illegal_insn, "No cbo.clean\n"); ++ ++ illegal_insn = false; ++ cbo_flush(&mem[0]); ++ ksft_test_result(illegal_insn, "No cbo.flush\n"); ++ ++ illegal_insn = false; ++ cbo_inval(&mem[0]); ++ ksft_test_result(illegal_insn, "No cbo.inval\n"); ++} ++ ++static void test_no_zicboz(void *arg) ++{ ++ ksft_print_msg("No Zicboz, testing cbo.zero remains privileged\n"); ++ ++ illegal_insn = false; ++ cbo_zero(&mem[0]); ++ ksft_test_result(illegal_insn, "No cbo.zero\n"); ++} ++ ++static bool is_power_of_2(__u64 n) ++{ ++ return n != 0 && (n & (n - 1)) == 0; ++} ++ ++static void test_zicboz(void *arg) ++{ ++ struct riscv_hwprobe pair = { ++ .key = RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE, ++ }; ++ cpu_set_t *cpus = (cpu_set_t *)arg; ++ __u64 block_size; ++ int i, j; ++ long rc; ++ ++ rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)cpus, 0); ++ block_size = pair.value; ++ ksft_test_result(rc == 0 && pair.key == RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE && ++ is_power_of_2(block_size), "Zicboz block size\n"); ++ ksft_print_msg("Zicboz block size: %ld\n", block_size); ++ ++ illegal_insn = false; ++ cbo_zero(&mem[block_size]); ++ ksft_test_result(!illegal_insn, "cbo.zero\n"); ++ ++ if (illegal_insn || !is_power_of_2(block_size)) { ++ ksft_test_result_skip("cbo.zero check\n"); ++ return; ++ } ++ ++ assert(block_size <= 1024); ++ ++ for (i = 0; i < 4096 / block_size; ++i) { ++ if (i % 2) ++ cbo_zero(&mem[i * block_size]); ++ } ++ ++ for (i = 0; i < 4096 / block_size; ++i) { ++ char expected = i % 2 ? 0x0 : 0xa5; ++ ++ for (j = 0; j < block_size; ++j) { ++ if (mem[i * block_size + j] != expected) { ++ ksft_test_result_fail("cbo.zero check\n"); ++ ksft_print_msg("cbo.zero check: mem[%d] != 0x%x\n", ++ i * block_size + j, expected); ++ return; ++ } ++ } ++ } ++ ++ ksft_test_result_pass("cbo.zero check\n"); ++} ++ ++static void check_no_zicboz_cpus(cpu_set_t *cpus) ++{ ++ struct riscv_hwprobe pair = { ++ .key = RISCV_HWPROBE_KEY_IMA_EXT_0, ++ }; ++ cpu_set_t one_cpu; ++ int i = 0, c = 0; ++ long rc; ++ ++ while (i++ < CPU_COUNT(cpus)) { ++ while (!CPU_ISSET(c, cpus)) ++ ++c; ++ ++ CPU_ZERO(&one_cpu); ++ CPU_SET(c, &one_cpu); ++ ++ rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)&one_cpu, 0); ++ assert(rc == 0 && pair.key == RISCV_HWPROBE_KEY_IMA_EXT_0); ++ ++ if (pair.value & RISCV_HWPROBE_EXT_ZICBOZ) ++ ksft_exit_fail_msg("Zicboz is only present on a subset of harts.\n" ++ "Use taskset to select a set of harts where Zicboz\n" ++ "presence (present or not) is consistent for each hart\n"); ++ ++c; ++ } ++} ++ ++enum { ++ TEST_ZICBOZ, ++ TEST_NO_ZICBOZ, ++ TEST_NO_ZICBOM, ++}; ++ ++static struct test_info { ++ bool enabled; ++ unsigned int nr_tests; ++ void (*test_fn)(void *arg); ++} tests[] = { ++ [TEST_ZICBOZ] = { .nr_tests = 3, test_zicboz }, ++ [TEST_NO_ZICBOZ] = { .nr_tests = 1, test_no_zicboz }, ++ [TEST_NO_ZICBOM] = { .nr_tests = 3, test_no_zicbom }, ++}; ++ ++int main(int argc, char **argv) ++{ ++ struct sigaction act = { ++ .sa_sigaction = &sigill_handler, ++ .sa_flags = SA_SIGINFO, ++ }; ++ struct riscv_hwprobe pair; ++ unsigned int plan = 0; ++ cpu_set_t cpus; ++ long rc; ++ int i; ++ ++ if (argc > 1 && !strcmp(argv[1], "--sigill")) { ++ rc = sigaction(SIGILL, &act, NULL); ++ assert(rc == 0); ++ tests[TEST_NO_ZICBOZ].enabled = true; ++ tests[TEST_NO_ZICBOM].enabled = true; ++ } ++ ++ rc = sched_getaffinity(0, sizeof(cpu_set_t), &cpus); ++ assert(rc == 0); ++ ++ ksft_print_header(); ++ ++ pair.key = RISCV_HWPROBE_KEY_IMA_EXT_0; ++ rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)&cpus, 0); ++ if (rc < 0) ++ ksft_exit_fail_msg("hwprobe() failed with %d\n", rc); ++ assert(rc == 0 && pair.key == RISCV_HWPROBE_KEY_IMA_EXT_0); ++ ++ if (pair.value & RISCV_HWPROBE_EXT_ZICBOZ) { ++ tests[TEST_ZICBOZ].enabled = true; ++ tests[TEST_NO_ZICBOZ].enabled = false; ++ } else { ++ check_no_zicboz_cpus(&cpus); ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(tests); ++i) ++ plan += tests[i].enabled ? tests[i].nr_tests : 0; ++ ++ if (plan == 0) ++ ksft_print_msg("No tests enabled.\n"); ++ else ++ ksft_set_plan(plan); ++ ++ for (i = 0; i < ARRAY_SIZE(tests); ++i) { ++ if (tests[i].enabled) ++ tests[i].test_fn(&cpus); ++ } ++ ++ ksft_finished(); ++} +diff --git a/tools/testing/selftests/riscv/hwprobe/hwprobe.c b/tools/testing/selftests/riscv/hwprobe/hwprobe.c +index 09f290a67420..d53e0889b59e 100644 +--- a/tools/testing/selftests/riscv/hwprobe/hwprobe.c ++++ b/tools/testing/selftests/riscv/hwprobe/hwprobe.c +@@ -1,14 +1,6 @@ + // SPDX-License-Identifier: GPL-2.0-only +-#include +-#include +- +-/* +- * Rather than relying on having a new enough libc to define this, just do it +- * ourselves. This way we don't need to be coupled to a new-enough libc to +- * contain the call. +- */ +-long riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, +- size_t cpu_count, unsigned long *cpus, unsigned int flags); ++#include "hwprobe.h" ++#include "../../kselftest.h" + + int main(int argc, char **argv) + { +@@ -16,6 +8,9 @@ int main(int argc, char **argv) + unsigned long cpus; + long out; + ++ ksft_print_header(); ++ ksft_set_plan(5); ++ + /* Fake the CPU_SET ops. */ + cpus = -1; + +@@ -25,13 +20,16 @@ int main(int argc, char **argv) + */ + for (long i = 0; i < 8; i++) + pairs[i].key = i; ++ + out = riscv_hwprobe(pairs, 8, 1, &cpus, 0); + if (out != 0) +- return -1; ++ ksft_exit_fail_msg("hwprobe() failed with %ld\n", out); ++ + for (long i = 0; i < 4; ++i) { + /* Fail if the kernel claims not to recognize a base key. */ + if ((i < 4) && (pairs[i].key != i)) +- return -2; ++ ksft_exit_fail_msg("Failed to recognize base key: key != i, " ++ "key=%ld, i=%ld\n", pairs[i].key, i); + + if (pairs[i].key != RISCV_HWPROBE_KEY_BASE_BEHAVIOR) + continue; +@@ -39,52 +37,30 @@ int main(int argc, char **argv) + if (pairs[i].value & RISCV_HWPROBE_BASE_BEHAVIOR_IMA) + continue; + +- return -3; ++ ksft_exit_fail_msg("Unexpected pair: (%ld, %ld)\n", pairs[i].key, pairs[i].value); + } + +- /* +- * This should also work with a NULL CPU set, but should not work +- * with an improperly supplied CPU set. +- */ + out = riscv_hwprobe(pairs, 8, 0, 0, 0); +- if (out != 0) +- return -4; ++ ksft_test_result(out == 0, "NULL CPU set\n"); + + out = riscv_hwprobe(pairs, 8, 0, &cpus, 0); +- if (out == 0) +- return -5; ++ ksft_test_result(out != 0, "Bad CPU set\n"); + + out = riscv_hwprobe(pairs, 8, 1, 0, 0); +- if (out == 0) +- return -6; ++ ksft_test_result(out != 0, "NULL CPU set with non-zero size\n"); + +- /* +- * Check that keys work by providing one that we know exists, and +- * checking to make sure the resultig pair is what we asked for. +- */ + pairs[0].key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR; + out = riscv_hwprobe(pairs, 1, 1, &cpus, 0); +- if (out != 0) +- return -7; +- if (pairs[0].key != RISCV_HWPROBE_KEY_BASE_BEHAVIOR) +- return -8; ++ ksft_test_result(out == 0 && pairs[0].key == RISCV_HWPROBE_KEY_BASE_BEHAVIOR, ++ "Existing key is maintained\n"); + +- /* +- * Check that an unknown key gets overwritten with -1, +- * but doesn't block elements after it. +- */ + pairs[0].key = 0x5555; + pairs[1].key = 1; + pairs[1].value = 0xAAAA; + out = riscv_hwprobe(pairs, 2, 0, 0, 0); +- if (out != 0) +- return -9; +- +- if (pairs[0].key != -1) +- return -10; +- +- if ((pairs[1].key != 1) || (pairs[1].value == 0xAAAA)) +- return -11; ++ ksft_test_result(out == 0 && pairs[0].key == -1 && ++ pairs[1].key == 1 && pairs[1].value != 0xAAAA, ++ "Unknown key overwritten with -1 and doesn't block other elements\n"); + +- return 0; ++ ksft_finished(); + } +diff --git a/tools/testing/selftests/riscv/hwprobe/hwprobe.h b/tools/testing/selftests/riscv/hwprobe/hwprobe.h +new file mode 100644 +index 000000000000..e3fccb390c4d +--- /dev/null ++++ b/tools/testing/selftests/riscv/hwprobe/hwprobe.h +@@ -0,0 +1,15 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++#ifndef SELFTEST_RISCV_HWPROBE_H ++#define SELFTEST_RISCV_HWPROBE_H ++#include ++#include ++ ++/* ++ * Rather than relying on having a new enough libc to define this, just do it ++ * ourselves. This way we don't need to be coupled to a new-enough libc to ++ * contain the call. ++ */ ++long riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, ++ size_t cpusetsize, unsigned long *cpus, unsigned int flags); ++ ++#endif +diff --git a/tools/testing/selftests/riscv/vector/vstate_prctl.c b/tools/testing/selftests/riscv/vector/vstate_prctl.c +index 8ad94e08ff4d..27668fb3b6d0 100644 +--- a/tools/testing/selftests/riscv/vector/vstate_prctl.c ++++ b/tools/testing/selftests/riscv/vector/vstate_prctl.c +@@ -1,20 +1,12 @@ + // SPDX-License-Identifier: GPL-2.0-only + #include + #include +-#include + #include + #include + ++#include "../hwprobe/hwprobe.h" + #include "../../kselftest.h" + +-/* +- * Rather than relying on having a new enough libc to define this, just do it +- * ourselves. This way we don't need to be coupled to a new-enough libc to +- * contain the call. +- */ +-long riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, +- size_t cpu_count, unsigned long *cpus, unsigned int flags); +- + #define NEXT_PROGRAM "./vstate_exec_nolibc" + static int launch_test(int test_inherit) + { -- 2.34.1 diff --git a/kernel.spec b/kernel.spec index e9e45154..c69991c1 100644 --- a/kernel.spec +++ b/kernel.spec @@ -42,7 +42,7 @@ rm -f test_openEuler_sign.ko test_openEuler_sign.ko.sig %global upstream_sublevel 0 %global devel_release 112 %global maintenance_release .0.0 -%global pkg_release .90 +%global pkg_release .91 %global openeuler_lts 1 %global openeuler_major 2403 @@ -1087,6 +1087,13 @@ fi %endif %changelog +* Fri Oct 17 2025 Mingzheng Xing - 6.6.0-112.0.0.91 +- RISC-V kernel upgrade to 6.6.0-112.0.0 +- Sync patches from the rvck-olk repository, including: + ACPI, IOMMU drivers; + Key RISC-V extensions support; + Add support for K1, SG2044, and DP1000. + * Wed Oct 15 2025 Li Nan - 6.6.0-112.0.0.90 - !18320 workqueue: Fix kabi broken of enum WORK_OFFQ_POOL_SHIFT - !18336 soc cache: support L3 cache lock in framework -- Gitee